From cb51ec9b355f38fe1c604900a10c4728deea5005 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 17:12:35 +0100 Subject: [PATCH 001/247] docs: [#1495] improve torrent-repository pkg readme --- packages/torrent-repository/README.md | 30 +++++++++------------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md index ffc71f1d7..a8c55746b 100644 --- a/packages/torrent-repository/README.md +++ b/packages/torrent-repository/README.md @@ -2,26 +2,16 @@ A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). -## Benchmarking - -```console -cargo bench -p torrust-tracker-torrent-repository -``` - -Example partial output: - -```output - Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) -add_one_torrent/RwLockStd - time: [63.057 ns 63.242 ns 63.506 ns] -Found 12 outliers among 100 measurements (12.00%) - 2 (2.00%) low severe - 2 (2.00%) low mild - 2 (2.00%) high mild - 6 (6.00%) high severe -add_one_torrent/RwLockStdMutexStd - time: [62.505 ns 63.077 ns 63.817 ns] -``` +Its main responsibilities include: + +- Managing Torrent Entries: It stores, retrieves, and manages torrent entries, which are torrents being tracked. +- Persistence: It supports lading tracked torrents from a persistent storage, ensuring that torrent data can be restored across restarts. +- Pagination and sorting: It provides paginated and stable/sorted access to torrent entries. +- Peer management: It manages peers associated with torrents, including removing inactive peers and handling torrents with no peers (peerless torrents). +- Policy handling: It supports different policies for handling torrents, such as persisting, removing, or custom policies for torrents with no peers. +- Metrics: It can provide metrics about the torrents, such as counts or statuses, likely for monitoring or statistics. + +This repo is a core component for managing the state and lifecycle of torrents and their peers in a BitTorrent tracker, with peer management, and flexible policies. ## Documentation From 15c14c50268c8b4567ab7d26503a11432c17bc9d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 17:29:21 +0100 Subject: [PATCH 002/247] refactor: [#1495] rename PeerList to Swarm --- packages/torrent-repository/src/entry/mod.rs | 2 +- .../src/entry/{peer_list.rs => swarm.rs} | 112 +++++++++--------- .../torrent-repository/src/entry/torrent.rs | 4 +- packages/torrent-repository/src/repository.rs | 6 +- 4 files changed, 63 insertions(+), 61 deletions(-) rename packages/torrent-repository/src/entry/{peer_list.rs => swarm.rs} (68%) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 785672be5..94fdcc58e 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,2 +1,2 @@ -pub mod peer_list; +pub mod swarm; pub mod torrent; diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/swarm.rs similarity index 68% rename from packages/torrent-repository/src/entry/peer_list.rs rename to packages/torrent-repository/src/entry/swarm.rs index 33270cf27..0395361a3 100644 --- a/packages/torrent-repository/src/entry/peer_list.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -1,9 +1,11 @@ //! A peer list. +use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::PeerId; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; // code-review: the current implementation uses the peer Id as the ``BTreeMap`` // key. That would allow adding two identical peers except for the Id. @@ -11,11 +13,11 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; // would be allowed. That would lead to duplicated peers in the tracker responses. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct PeerList { - peers: std::collections::BTreeMap>, +pub struct Swarm { + peers: BTreeMap>, } -impl PeerList { +impl Swarm { #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -94,193 +96,193 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::peer_list::PeerList; + use crate::entry::swarm::Swarm; #[test] fn be_empty_when_no_peers_have_been_inserted() { - let peer_list = PeerList::default(); + let swarm = Swarm::default(); - assert!(peer_list.is_empty()); + assert!(swarm.is_empty()); } #[test] fn have_zero_length_when_no_peers_have_been_inserted() { - let peer_list = PeerList::default(); + let swarm = Swarm::default(); - assert_eq!(peer_list.len(), 0); + assert_eq!(swarm.len(), 0); } #[test] fn allow_inserting_a_new_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - assert_eq!(peer_list.upsert(peer.into()), None); + assert_eq!(swarm.upsert(peer.into()), None); } #[test] fn allow_updating_a_preexisting_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); } #[test] fn allow_getting_all_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + assert_eq!(swarm.get_all(None), [Arc::new(peer)]); } #[test] fn allow_getting_one_peer_by_id() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + assert_eq!(swarm.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); } #[test] fn increase_the_number_of_peers_after_inserting_a_new_one() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.len(), 1); + assert_eq!(swarm.len(), 1); } #[test] fn decrease_the_number_of_peers_after_removing_one() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - peer_list.remove(&peer.peer_id); + swarm.remove(&peer.peer_id); - assert!(peer_list.is_empty()); + assert!(swarm.is_empty()); } #[test] fn allow_removing_an_existing_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - peer_list.remove(&peer.peer_id); + swarm.remove(&peer.peer_id); - assert_eq!(peer_list.get(&peer.peer_id), None); + assert_eq!(swarm.get(&peer.peer_id), None); } #[test] fn allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - peer_list.upsert(peer1.into()); + swarm.upsert(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - peer_list.upsert(peer2.into()); + swarm.upsert(peer2.into()); - assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); } #[test] fn return_the_number_of_seeders_in_the_list() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - peer_list.upsert(seeder.into()); - peer_list.upsert(leecher.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - let (seeders, _leechers) = peer_list.seeders_and_leechers(); + let (seeders, _leechers) = swarm.seeders_and_leechers(); assert_eq!(seeders, 1); } #[test] fn return_the_number_of_leechers_in_the_list() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - peer_list.upsert(seeder.into()); - peer_list.upsert(leecher.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - let (_seeders, leechers) = peer_list.seeders_and_leechers(); + let (_seeders, leechers) = swarm.seeders_and_leechers(); assert_eq!(leechers, 1); } #[test] fn remove_inactive_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); // Remove peers not updated since one second after inserting the peer - peer_list.remove_inactive_peers(last_update_time + one_second); + swarm.remove_inactive_peers(last_update_time + one_second); - assert_eq!(peer_list.len(), 0); + assert_eq!(swarm.len(), 0); } #[test] fn not_remove_active_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); // Remove peers not updated since one second before inserting the peer. - peer_list.remove_inactive_peers(last_update_time - one_second); + swarm.remove_inactive_peers(last_update_time - one_second); - assert_eq!(peer_list.len(), 1); + assert_eq!(swarm.len(), 1); } #[test] fn allow_inserting_two_identical_peers_except_for_the_id() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); - peer_list.upsert(peer1.into()); + swarm.upsert(peer1.into()); let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); - peer_list.upsert(peer2.into()); + swarm.upsert(peer2.into()); - assert_eq!(peer_list.len(), 2); + assert_eq!(swarm.len(), 2); } } } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 1cc0f7ba2..48f1a2df1 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::peer_list::PeerList; +use super::swarm::Swarm; /// A data structure containing all the information about a torrent in the tracker. /// @@ -18,7 +18,7 @@ use super::peer_list::PeerList; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent associated to this entry - pub(crate) swarm: PeerList, + pub(crate) swarm: Swarm, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 8e67f2487..0c387071c 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use crate::entry::peer_list::PeerList; +use crate::entry::swarm::Swarm; use crate::entry::torrent::TrackedTorrent; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; @@ -53,7 +53,7 @@ impl TorrentRepository { let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { TrackedTorrentHandle::new( TrackedTorrent { - swarm: PeerList::default(), + swarm: Swarm::default(), downloaded: number_of_downloads, } .into(), @@ -237,7 +237,7 @@ impl TorrentRepository { let entry = TrackedTorrentHandle::new( TrackedTorrent { - swarm: PeerList::default(), + swarm: Swarm::default(), downloaded: *completed, } .into(), From 2882705fbab880ae57cebad4944e6d2452eb63fd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 20:57:27 +0100 Subject: [PATCH 003/247] refactor: [#1495] use SocketAddr as key for peers in Swarm This change prevents duplicate peers with the same address but different IDs, ensuring more accurate peer tracking. --- .../tests/server/asserts.rs | 1 + .../tests/server/requests/announce.rs | 5 ++ .../tests/server/v1/contract.rs | 46 +++++++++++--- .../torrent-repository/src/entry/swarm.rs | 62 ++++++++++++------- .../torrent-repository/src/entry/torrent.rs | 2 +- .../tests/common/torrent_peer_builder.rs | 18 +++++- .../torrent-repository/tests/entry/mod.rs | 3 +- packages/tracker-core/src/lib.rs | 8 +-- packages/tracker-core/src/test_helpers.rs | 6 +- 9 files changed, 109 insertions(+), 42 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/asserts.rs b/packages/axum-http-tracker-server/tests/server/asserts.rs index 7ab8d93e5..a82014e16 100644 --- a/packages/axum-http-tracker-server/tests/server/asserts.rs +++ b/packages/axum-http-tracker-server/tests/server/asserts.rs @@ -22,6 +22,7 @@ pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &s ); } +#[allow(dead_code)] pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); diff --git a/packages/axum-http-tracker-server/tests/server/requests/announce.rs b/packages/axum-http-tracker-server/tests/server/requests/announce.rs index 0775de7e4..5a670b618 100644 --- a/packages/axum-http-tracker-server/tests/server/requests/announce.rs +++ b/packages/axum-http-tracker-server/tests/server/requests/announce.rs @@ -126,6 +126,11 @@ impl QueryBuilder { self } + pub fn with_port(mut self, port: u16) -> Self { + self.announce_query.port = port; + self + } + pub fn without_compact(mut self) -> Self { self.announce_query.compact = None; self diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index 37d96052f..d1f52d55a 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -105,8 +105,8 @@ mod for_all_config_modes { use crate::common::fixtures::invalid_info_hashes; use crate::server::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, - assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, - assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, }; use crate::server::client::Client; use crate::server::requests::announce::{Compact, QueryBuilder}; @@ -559,7 +559,8 @@ mod for_all_config_modes { } #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_socket_address_even_if_the_peer_id_is_different( + ) { logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -567,19 +568,44 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let peer = PeerBuilder::default().build(); - // Add a peer - env.add_torrent_peer(&info_hash, &peer); - - let announce_query = QueryBuilder::default() + let announce_query_1 = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) + .query(); + + let announce_query_2 = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) // Different peer ID + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) .query(); - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + // Same peer socket address + assert_eq!(announce_query_1.peer_addr, announce_query_2.peer_addr); + assert_eq!(announce_query_1.port, announce_query_2.port); + + // Different peer ID + assert_ne!(announce_query_1.peer_id, announce_query_2.peer_id); - let response = Client::new(*env.bind_address()).announce(&announce_query).await; + let _response = Client::new(*env.bind_address()).announce(&announce_query_1).await; + let response = Client::new(*env.bind_address()).announce(&announce_query_2).await; - assert_empty_announce_response(response).await; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; + + // The response should contain only the first peer. + assert_announce_response( + response, + &Announce { + complete: 1, + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![], + }, + ) + .await; env.stop().await; } diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 0395361a3..d6a7df102 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -3,18 +3,12 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::peer::{self, Peer}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -// code-review: the current implementation uses the peer Id as the ``BTreeMap`` -// key. That would allow adding two identical peers except for the Id. -// For example, two peers with the same socket address but a different peer Id -// would be allowed. That would lead to duplicated peers in the tracker responses. - #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { - peers: BTreeMap>, + peers: BTreeMap>, } impl Swarm { @@ -28,12 +22,12 @@ impl Swarm { self.peers.is_empty() } - pub fn upsert(&mut self, value: Arc) -> Option> { - self.peers.insert(value.peer_id, value) + pub fn upsert(&mut self, peer: Arc) -> Option> { + self.peers.insert(peer.peer_addr, peer) } - pub fn remove(&mut self, key: &PeerId) -> Option> { - self.peers.remove(key) + pub fn remove(&mut self, peer: &Peer) -> Option> { + self.peers.remove(&peer.peer_addr) } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { @@ -42,12 +36,12 @@ impl Swarm { } #[must_use] - pub fn get(&self, peer_id: &PeerId) -> Option<&Arc> { - self.peers.get(peer_id) + pub fn get(&self, peer_addr: &SocketAddr) -> Option<&Arc> { + self.peers.get(peer_addr) } #[must_use] - pub fn get_all(&self, limit: Option) -> Vec> { + pub fn get_all(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), None => self.peers.values().cloned().collect(), @@ -151,7 +145,7 @@ mod tests { swarm.upsert(peer.into()); - assert_eq!(swarm.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } #[test] @@ -173,7 +167,7 @@ mod tests { swarm.upsert(peer.into()); - swarm.remove(&peer.peer_id); + swarm.remove(&peer); assert!(swarm.is_empty()); } @@ -186,9 +180,9 @@ mod tests { swarm.upsert(peer.into()); - swarm.remove(&peer.peer_id); + swarm.remove(&peer); - assert_eq!(swarm.get(&peer.peer_id), None); + assert_eq!(swarm.get(&peer.peer_addr), None); } #[test] @@ -273,16 +267,42 @@ mod tests { } #[test] - fn allow_inserting_two_identical_peers_except_for_the_id() { + fn allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); - let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + let peer1 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); swarm.upsert(peer1.into()); - let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); + let peer2 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); swarm.upsert(peer2.into()); assert_eq!(swarm.len(), 2); } + + #[test] + fn not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Swarm::default(); + + // When that happens the peer ID will be changed in the swarm. + // In practice, it's like if the peer had changed its ID. + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 1); + } } } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 48f1a2df1..b251699ec 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -75,7 +75,7 @@ impl TrackedTorrent { match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { - drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + drop(self.swarm.remove(peer)); } AnnounceEvent::Completed => { let previous = self.swarm.upsert(Arc::new(*peer)); diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index 33120180d..0c065e670 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; @@ -67,24 +67,40 @@ impl TorrentPeerBuilder { /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped +#[allow(clippy::cast_sign_loss)] +#[allow(clippy::cast_possible_truncation)] #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); + let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); + TorrentPeerBuilder::new() .with_number_of_bytes_left(0) .with_event_completed() .with_peer_id(*peer_id) + .with_peer_address(peer_addr) .into() } /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped +/// +/// # Panics +/// +/// This function panics if proved id can't be converted into a valid socket address port. +/// +/// The `id` argument is used to identify the peer in both the `peer_id` and the `peer_addr`. +#[allow(clippy::cast_sign_loss)] +#[allow(clippy::cast_possible_truncation)] #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); + let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); + TorrentPeerBuilder::new() .with_number_of_bytes_left(1) .with_event_started() .with_peer_id(*peer_id) + .with_peer_address(peer_addr) .into() } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 27bb5f238..5f958f05c 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -370,8 +370,7 @@ async fn it_should_limit_the_number_of_peers_returned( // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { - let mut peer = a_started_peer(1); - peer.peer_id = *peer::Id::new(peer_number); + let peer = a_started_peer(peer_number); torrent.upsert_peer(&peer); } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index d9da9b9e7..82ebac3c6 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -224,14 +224,14 @@ mod tests { // Scrape let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); - // The expected swarm metadata for the file + // The expected swarm metadata for the torrent let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( &info_hash, SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced + complete: 1, // the "incomplete" announced + downloaded: 0, // the "complete" peer download does not count because it was not previously known + incomplete: 1, // the "incomplete" peer announced }, ); diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 0d7ca012f..04fe4133b 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -104,7 +104,7 @@ pub(crate) mod tests { #[must_use] pub fn complete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -118,8 +118,8 @@ pub(crate) mod tests { #[must_use] pub fn incomplete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), From 0a4c8050515825244ee4e62ccea0332deb83a84a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 08:49:46 +0100 Subject: [PATCH 004/247] refactor: [#1495] add SwarmMetadata to Swarm - Moved responsability for keeping metadata to the Swarm type. - Number of seeder and leechers is now calculated when the Swarm changes not on-demand. We avoid iterating over the peers to get the number of seeders and leechers. - The number of downloads is also calculate now in the Swarm. It will be removed from the TrackedTorrent. --- packages/primitives/src/swarm_metadata.rs | 17 +- .../torrent-repository/src/entry/swarm.rs | 652 +++++++++++++----- 2 files changed, 503 insertions(+), 166 deletions(-) diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index 792eff632..a70298d71 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -7,7 +7,7 @@ use derive_more::Constructor; /// Swarm metadata dictionary in the scrape response. /// /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Constructor)] pub struct SwarmMetadata { /// (i.e `completed`): The number of peers that have ever completed /// downloading a given torrent. @@ -27,6 +27,21 @@ impl SwarmMetadata { pub fn zeroed() -> Self { Self::default() } + + #[must_use] + pub fn downloads(&self) -> u32 { + self.downloaded + } + + #[must_use] + pub fn seeders(&self) -> u32 { + self.complete + } + + #[must_use] + pub fn leechers(&self) -> u32 { + self.incomplete + } } /// Structure that holds aggregate swarm metadata. diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index d6a7df102..7331d4504 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -1,14 +1,18 @@ -//! A peer list. +//! A swarm is a collection of peers that are all trying to download the same +//! torrent. use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { peers: BTreeMap>, + metadata: SwarmMetadata, } impl Swarm { @@ -23,16 +27,82 @@ impl Swarm { } pub fn upsert(&mut self, peer: Arc) -> Option> { - self.peers.insert(peer.peer_addr, peer) + let new_peer_is_seeder = peer.is_seeder(); + let new_peer_completed = peer.event == AnnounceEvent::Completed; + + if let Some(old_peer) = self.peers.insert(peer.peer_addr, peer) { + // A peer has been updated in the swarm. + + // Check if the peer has changed its from leecher to seeder or vice versa. + if old_peer.is_seeder() != new_peer_is_seeder { + if new_peer_is_seeder { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + // Check if the peer has completed downloading the torrent. + if new_peer_completed && old_peer.event != AnnounceEvent::Completed { + self.metadata.downloaded += 1; + } + + Some(old_peer) + } else { + // A new peer has been added to the swarm. + + // Check if the peer is a seeder or a leecher. + if new_peer_is_seeder { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + + // Check if the peer has completed downloading the torrent. + if new_peer_completed { + // Don't increment `downloaded` here: we only count transitions + // from a known peer + } + + None + } } pub fn remove(&mut self, peer: &Peer) -> Option> { - self.peers.remove(&peer.peer_addr) + match self.peers.remove(&peer.peer_addr) { + Some(old_peer) => { + // A peer has been removed from the swarm. + + // Check if the peer was a seeder or a leecher. + if old_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + + Some(old_peer) + } + None => None, + } } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.peers - .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + self.peers.retain(|_, peer| { + let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; + + if !is_active { + // Update the metadata when removing a peer. + if peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + is_active + }); } #[must_use] @@ -48,14 +118,6 @@ impl Swarm { } } - #[must_use] - pub fn seeders_and_leechers(&self) -> (usize, usize) { - let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); - let leechers = self.len() - seeders; - - (seeders, leechers) - } - #[must_use] pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { match limit { @@ -77,232 +139,492 @@ impl Swarm { .collect(), } } + + #[must_use] + pub fn metadata(&self) -> SwarmMetadata { + self.metadata + } + + /// Returns the number of seeders and leechers in the swarm. + /// + /// # Panics + /// + /// This function will panic if the `complete` or `incomplete` fields in the + /// `metadata` field cannot be converted to `usize`. + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self + .metadata + .complete + .try_into() + .expect("Failed to convert 'complete' (seeders) count to usize"); + let leechers = self + .metadata + .incomplete + .try_into() + .expect("Failed to convert 'incomplete' (leechers) count to usize"); + + (seeders, leechers) + } } #[cfg(test)] mod tests { - mod it_should { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; - use aquatic_udp_protocol::PeerId; - use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_primitives::DurationSinceUnixEpoch; + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::swarm::Swarm; + use crate::entry::swarm::Swarm; - #[test] - fn be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + #[test] + fn it_should_be_empty_when_no_peers_have_been_inserted() { + let swarm = Swarm::default(); - assert!(swarm.is_empty()); - } + assert!(swarm.is_empty()); + } - #[test] - fn have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + #[test] + fn it_should_have_zero_length_when_no_peers_have_been_inserted() { + let swarm = Swarm::default(); - assert_eq!(swarm.len(), 0); - } + assert_eq!(swarm.len(), 0); + } - #[test] - fn allow_inserting_a_new_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_inserting_a_new_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert(peer.into()), None); - } + assert_eq!(swarm.upsert(peer.into()), None); + } - #[test] - fn allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_updating_a_preexisting_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); - } + assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); + } - #[test] - fn allow_getting_all_peers() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_getting_all_peers() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.get_all(None), [Arc::new(peer)]); - } + assert_eq!(swarm.get_all(None), [Arc::new(peer)]); + } - #[test] - fn allow_getting_one_peer_by_id() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_getting_one_peer_by_id() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); - } + assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); + } - #[test] - fn increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.len(), 1); - } + assert_eq!(swarm.len(), 1); + } - #[test] - fn decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_decrease_the_number_of_peers_after_removing_one() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - swarm.remove(&peer); + swarm.remove(&peer); - assert!(swarm.is_empty()); - } + assert!(swarm.is_empty()); + } - #[test] - fn allow_removing_an_existing_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_removing_an_existing_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - swarm.remove(&peer); + let old = swarm.remove(&peer); - assert_eq!(swarm.get(&peer.peer_addr), None); - } + assert_eq!(old, Some(Arc::new(peer))); + assert_eq!(swarm.get(&peer.peer_addr), None); + } - #[test] - fn allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_removing_a_non_existing_peer() { + let mut swarm = Swarm::default(); - let peer1 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + let peer = PeerBuilder::default().build(); - let peer2 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) - .build(); - swarm.upsert(peer2.into()); + assert_eq!(swarm.remove(&peer), None); + } - assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); - } + #[test] + fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut swarm = Swarm::default(); - #[test] - fn return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::default(); + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); - let seeder = PeerBuilder::seeder().build(); - let leecher = PeerBuilder::leecher().build(); + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert(peer2.into()); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } - let (seeders, _leechers) = swarm.seeders_and_leechers(); + #[test] + fn it_should_remove_inactive_peers() { + let mut swarm = Swarm::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); - assert_eq!(seeders, 1); - } + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + swarm.remove_inactive_peers(last_update_time + one_second); - #[test] - fn return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::default(); + assert_eq!(swarm.len(), 0); + } - let seeder = PeerBuilder::seeder().build(); - let leecher = PeerBuilder::leecher().build(); + #[test] + fn it_should_not_remove_active_peers() { + let mut swarm = Swarm::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert(peer.into()); - let (_seeders, leechers) = swarm.seeders_and_leechers(); + // Remove peers not updated since one second before inserting the peer. + swarm.remove_inactive_peers(last_update_time - one_second); - assert_eq!(leechers, 1); - } + assert_eq!(swarm.len(), 1); + } + + #[test] + fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + let mut swarm = Swarm::default(); + + let peer1 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 2); + } + + #[test] + fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Swarm::default(); + + // When that happens the peer ID will be changed in the swarm. + // In practice, it's like if the peer had changed its ID. + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 1); + } + + #[test] + fn it_should_return_the_metadata() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); + + assert_eq!( + swarm.metadata(), + SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 1, + } + ); + } + + #[test] + fn it_should_return_the_number_of_seeders_in_the_list() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); + + let (seeders, _leechers) = swarm.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn it_should_return_the_number_of_leechers_in_the_list() { + let mut swarm = Swarm::default(); - #[test] - fn remove_inactive_peers() { - let mut swarm = Swarm::default(); - let one_second = DurationSinceUnixEpoch::new(1, 0); + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); - // Insert the peer - let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); - let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive_peers(last_update_time + one_second); + let (_seeders, leechers) = swarm.seeders_and_leechers(); - assert_eq!(swarm.len(), 0); + assert_eq!(leechers, 1); + } + + mod updating_the_swarm_metadata { + + mod when_a_new_peer_is_added { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + let mut swarm = Swarm::default(); + + let leechers = swarm.metadata().leechers(); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(leecher.into()); + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + } + + #[test] + fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + let mut swarm = Swarm::default(); + + let seeders = swarm.metadata().seeders(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + } + + #[test] + fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + ) { + let mut swarm = Swarm::default(); + + let downloads = swarm.metadata().downloads(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + assert_eq!(swarm.metadata().downloads(), downloads); + } } - #[test] - fn not_remove_active_peers() { - let mut swarm = Swarm::default(); - let one_second = DurationSinceUnixEpoch::new(1, 0); + mod when_a_peer_is_removed { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + let mut swarm = Swarm::default(); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(leecher.into()); + + let leechers = swarm.metadata().leechers(); + + swarm.remove(&leecher); - // Insert the peer - let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); - let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } - // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive_peers(last_update_time - one_second); + #[test] + fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + let mut swarm = Swarm::default(); - assert_eq!(swarm.len(), 1); + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + let seeders = swarm.metadata().seeders(); + + swarm.remove(&seeder); + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } } - #[test] - fn allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::default(); + mod when_a_peer_is_removed_due_to_inactivity { + use std::time::Duration; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + let mut swarm = Swarm::default(); + + let leecher = PeerBuilder::leecher().build(); - let peer1 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + swarm.upsert(leecher.into()); - let peer2 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) - .build(); - swarm.upsert(peer2.into()); + let leechers = swarm.metadata().leechers(); - assert_eq!(swarm.len(), 2); + swarm.remove_inactive_peers(leecher.updated + Duration::from_secs(1)); + + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[test] + fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + let seeders = swarm.metadata().seeders(); + + swarm.remove_inactive_peers(seeder.updated + Duration::from_secs(1)); + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } } - #[test] - fn not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::default(); + mod for_changes_in_existing_peers { + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(0); // Convert to seeder + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[test] + fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::seeder().build(); + + swarm.upsert(peer.into()); + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(10); // Convert to leecher + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } + + #[test] + fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let downloads = swarm.metadata().downloads(); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } + + #[test] + fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let downloads = swarm.metadata().downloads(); - // When that happens the peer ID will be changed in the swarm. - // In practice, it's like if the peer had changed its ID. + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - let peer1 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + swarm.upsert(peer.into()); - let peer2 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer2.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.len(), 1); + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } } } } From 61560a8bd27a4eedb8d19bde450fce39474fc076 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 11:36:01 +0100 Subject: [PATCH 005/247] chore: add gitignore to torrent-repository pkg --- packages/torrent-repository/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/torrent-repository/.gitignore diff --git a/packages/torrent-repository/.gitignore b/packages/torrent-repository/.gitignore new file mode 100644 index 000000000..c9907ae11 --- /dev/null +++ b/packages/torrent-repository/.gitignore @@ -0,0 +1 @@ +/.coverage/ From f73c56698c94e2d2e5e177e470c8a9c291ab791e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 16:21:39 +0100 Subject: [PATCH 006/247] refactor: [#1495] some renamings in Swarm type --- .../torrent-repository/src/entry/swarm.rs | 142 +++++++++--------- .../torrent-repository/src/entry/torrent.rs | 10 +- 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 7331d4504..05c09b68e 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -5,37 +5,27 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; -use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { - peers: BTreeMap>, + peers: BTreeMap>, metadata: SwarmMetadata, } impl Swarm { - #[must_use] - pub fn len(&self) -> usize { - self.peers.len() - } - - #[must_use] - pub fn is_empty(&self) -> bool { - self.peers.is_empty() - } - - pub fn upsert(&mut self, peer: Arc) -> Option> { - let new_peer_is_seeder = peer.is_seeder(); - let new_peer_completed = peer.event == AnnounceEvent::Completed; + pub fn handle_announce(&mut self, incoming_announce: Arc) -> Option> { + let is_now_seeder = incoming_announce.is_seeder(); + let has_completed = incoming_announce.event == AnnounceEvent::Completed; - if let Some(old_peer) = self.peers.insert(peer.peer_addr, peer) { + if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. // Check if the peer has changed its from leecher to seeder or vice versa. - if old_peer.is_seeder() != new_peer_is_seeder { - if new_peer_is_seeder { + if old_announce.is_seeder() != is_now_seeder { + if is_now_seeder { self.metadata.complete += 1; self.metadata.incomplete -= 1; } else { @@ -45,23 +35,23 @@ impl Swarm { } // Check if the peer has completed downloading the torrent. - if new_peer_completed && old_peer.event != AnnounceEvent::Completed { + if has_completed && old_announce.event != AnnounceEvent::Completed { self.metadata.downloaded += 1; } - Some(old_peer) + Some(old_announce) } else { // A new peer has been added to the swarm. // Check if the peer is a seeder or a leecher. - if new_peer_is_seeder { + if is_now_seeder { self.metadata.complete += 1; } else { self.metadata.incomplete += 1; } // Check if the peer has completed downloading the torrent. - if new_peer_completed { + if has_completed { // Don't increment `downloaded` here: we only count transitions // from a known peer } @@ -70,8 +60,8 @@ impl Swarm { } } - pub fn remove(&mut self, peer: &Peer) -> Option> { - match self.peers.remove(&peer.peer_addr) { + pub fn remove(&mut self, peer_to_remove: &Peer) -> Option> { + match self.peers.remove(&peer_to_remove.peer_addr) { Some(old_peer) => { // A peer has been removed from the swarm. @@ -88,7 +78,7 @@ impl Swarm { } } - pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.peers.retain(|_, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; @@ -111,7 +101,7 @@ impl Swarm { } #[must_use] - pub fn get_all(&self, limit: Option) -> Vec> { + pub fn peers(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), None => self.peers.values().cloned().collect(), @@ -119,7 +109,7 @@ impl Swarm { } #[must_use] - pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { + pub fn peers_excluding(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { match limit { Some(limit) => self .peers @@ -166,6 +156,16 @@ impl Swarm { (seeders, leechers) } + + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } } #[cfg(test)] @@ -201,7 +201,7 @@ mod tests { let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert(peer.into()), None); + assert_eq!(swarm.handle_announce(peer.into()), None); } #[test] @@ -210,9 +210,9 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.handle_announce(peer.into()), Some(Arc::new(peer))); } #[test] @@ -221,9 +221,9 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - assert_eq!(swarm.get_all(None), [Arc::new(peer)]); + assert_eq!(swarm.peers(None), [Arc::new(peer)]); } #[test] @@ -232,7 +232,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -243,7 +243,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.len(), 1); } @@ -254,7 +254,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); swarm.remove(&peer); @@ -267,7 +267,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let old = swarm.remove(&peer); @@ -292,15 +292,15 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); - assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } #[test] @@ -311,10 +311,10 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive_peers(last_update_time + one_second); + swarm.remove_inactive(last_update_time + one_second); assert_eq!(swarm.len(), 0); } @@ -327,10 +327,10 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive_peers(last_update_time - one_second); + swarm.remove_inactive(last_update_time - one_second); assert_eq!(swarm.len(), 1); } @@ -342,12 +342,12 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); assert_eq!(swarm.len(), 2); } @@ -363,13 +363,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); assert_eq!(swarm.len(), 1); } @@ -381,8 +381,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); assert_eq!( swarm.metadata(), @@ -401,8 +401,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -416,8 +416,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -439,7 +439,7 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -452,7 +452,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -466,7 +466,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); assert_eq!(swarm.metadata().downloads(), downloads); } @@ -483,7 +483,7 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); let leechers = swarm.metadata().leechers(); @@ -498,7 +498,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); let seeders = swarm.metadata().seeders(); @@ -521,11 +521,11 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); let leechers = swarm.metadata().leechers(); - swarm.remove_inactive_peers(leecher.updated + Duration::from_secs(1)); + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)); assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -536,11 +536,11 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); let seeders = swarm.metadata().seeders(); - swarm.remove_inactive_peers(seeder.updated + Duration::from_secs(1)); + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)); assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -558,14 +558,14 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -577,14 +577,14 @@ mod tests { let mut peer = PeerBuilder::seeder().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -596,13 +596,13 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -613,15 +613,15 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index b251699ec..3a895008f 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -62,12 +62,12 @@ impl TrackedTorrent { #[must_use] pub fn get_peers(&self, limit: Option) -> Vec> { - self.swarm.get_all(limit) + self.swarm.peers(limit) } #[must_use] pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.get_peers_excluding_addr(client, limit) + self.swarm.peers_excluding(client, limit) } pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { @@ -78,7 +78,7 @@ impl TrackedTorrent { drop(self.swarm.remove(peer)); } AnnounceEvent::Completed => { - let previous = self.swarm.upsert(Arc::new(*peer)); + let previous = self.swarm.handle_announce(Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.downloaded += 1; @@ -88,7 +88,7 @@ impl TrackedTorrent { _ => { // `Started` event (first announced event) or // `None` event (announcements done at regular intervals). - drop(self.swarm.upsert(Arc::new(*peer))); + drop(self.swarm.handle_announce(Arc::new(*peer))); } } @@ -96,6 +96,6 @@ impl TrackedTorrent { } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm.remove_inactive_peers(current_cutoff); + self.swarm.remove_inactive(current_cutoff); } } From 82bbfe3fcfa2768d527efbddff26ec44e0bee136 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:11:23 +0100 Subject: [PATCH 007/247] refactor: [#1495] move logic from TackedTorrent to Swarm --- .../torrent-repository/src/entry/swarm.rs | 118 ++++++++++++------ .../torrent-repository/src/entry/torrent.rs | 54 ++++---- packages/torrent-repository/src/repository.rs | 10 +- .../tests/common/torrent.rs | 16 +-- .../tests/repository/mod.rs | 24 ++-- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 2 +- 7 files changed, 128 insertions(+), 98 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 05c09b68e..5d97655ea 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -16,7 +16,20 @@ pub struct Swarm { } impl Swarm { - pub fn handle_announce(&mut self, incoming_announce: Arc) -> Option> { + pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { + let mut downloads_increased: bool = false; + + let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { + AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { + self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased) + } + AnnounceEvent::Stopped => self.remove(incoming_announce), + }; + + downloads_increased + } + + pub fn upsert_peer(&mut self, incoming_announce: Arc, downloads_increased: &mut bool) -> Option> { let is_now_seeder = incoming_announce.is_seeder(); let has_completed = incoming_announce.event == AnnounceEvent::Completed; @@ -37,6 +50,7 @@ impl Swarm { // Check if the peer has completed downloading the torrent. if has_completed && old_announce.event != AnnounceEvent::Completed { self.metadata.downloaded += 1; + *downloads_increased = true; } Some(old_announce) @@ -198,30 +212,33 @@ mod tests { #[test] fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.handle_announce(peer.into()), None); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), None); } #[test] fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); - assert_eq!(swarm.handle_announce(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), Some(Arc::new(peer))); } #[test] fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.peers(None), [Arc::new(peer)]); } @@ -229,10 +246,11 @@ mod tests { #[test] fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -240,10 +258,11 @@ mod tests { #[test] fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.len(), 1); } @@ -251,10 +270,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); swarm.remove(&peer); @@ -264,10 +284,11 @@ mod tests { #[test] fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let old = swarm.remove(&peer); @@ -287,18 +308,19 @@ mod tests { #[test] fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } @@ -306,12 +328,13 @@ mod tests { #[test] fn it_should_remove_inactive_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second); @@ -322,12 +345,13 @@ mod tests { #[test] fn it_should_not_remove_active_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second); @@ -338,16 +362,17 @@ mod tests { #[test] fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.len(), 2); } @@ -355,6 +380,7 @@ mod tests { #[test] fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -363,13 +389,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.len(), 1); } @@ -377,12 +403,13 @@ mod tests { #[test] fn it_should_return_the_metadata() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); assert_eq!( swarm.metadata(), @@ -397,12 +424,13 @@ mod tests { #[test] fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -412,12 +440,13 @@ mod tests { #[test] fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -434,12 +463,13 @@ mod tests { #[test] fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -447,12 +477,13 @@ mod tests { #[test] fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -461,12 +492,13 @@ mod tests { fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads); } @@ -480,10 +512,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); @@ -495,10 +528,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); let seeders = swarm.metadata().seeders(); @@ -518,10 +552,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); @@ -533,10 +568,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); let seeders = swarm.metadata().seeders(); @@ -555,17 +591,18 @@ mod tests { #[test] fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -574,17 +611,18 @@ mod tests { #[test] fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -593,16 +631,17 @@ mod tests { #[test] fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -610,18 +649,19 @@ mod tests { #[test] fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 3a895008f..b92ca5243 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -2,7 +2,6 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -10,35 +9,41 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::swarm::Swarm; -/// A data structure containing all the information about a torrent in the tracker. +/// A data structure containing all the information about a torrent in the +/// tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. +/// /// The tracker keeps one entry like this for every torrent. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { - /// A network of peers that are all trying to download the torrent associated to this entry + /// A network of peers that are all trying to download the torrent. pub(crate) swarm: Swarm, - /// The number of peers that have ever completed downloading the torrent associated to this entry + /// The number of peers that have ever completed downloading the torrent. + /// This value is can be persistent so it's loaded from the database when + /// the tracker starts. pub(crate) downloaded: u32, } impl TrackedTorrent { - #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { - let (seeders, leechers) = self.swarm.seeders_and_leechers(); + let metadata = self.swarm.metadata(); SwarmMetadata { downloaded: self.downloaded, - complete: seeders as u32, - incomplete: leechers as u32, + complete: metadata.complete, + incomplete: metadata.incomplete, } } + /// Returns true if the torrents meets the retention policy, meaning that + /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + // code-review: why? if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } @@ -51,17 +56,17 @@ impl TrackedTorrent { } #[must_use] - pub fn peers_is_empty(&self) -> bool { + pub fn swarm_is_empty(&self) -> bool { self.swarm.is_empty() } #[must_use] - pub fn get_peers_len(&self) -> usize { + pub fn swarm_len(&self) -> usize { self.swarm.len() } #[must_use] - pub fn get_peers(&self, limit: Option) -> Vec> { + pub fn swarm_peers(&self, limit: Option) -> Vec> { self.swarm.peers(limit) } @@ -70,29 +75,14 @@ impl TrackedTorrent { self.swarm.peers_excluding(client, limit) } - pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - let mut number_of_downloads_increased: bool = false; - - match peer::ReadInfo::get_event(peer) { - AnnounceEvent::Stopped => { - drop(self.swarm.remove(peer)); - } - AnnounceEvent::Completed => { - let previous = self.swarm.handle_announce(Arc::new(*peer)); - // Don't count if peer was not previously known and not already completed. - if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.downloaded += 1; - number_of_downloads_increased = true; - } - } - _ => { - // `Started` event (first announced event) or - // `None` event (announcements done at regular intervals). - drop(self.swarm.handle_announce(Arc::new(*peer))); - } + pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { + let downloads_increased = self.swarm.handle_announcement(peer); + + if downloads_increased { + self.downloaded += 1; } - number_of_downloads_increased + downloads_increased } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 0c387071c..69bfcf17b 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -46,7 +46,7 @@ impl TorrentRepository { if let Some(existing_entry) = self.torrents.get(info_hash) { tracing::debug!("Torrent already exists: {:?}", info_hash); - existing_entry.value().lock_or_panic().upsert_peer(peer) + existing_entry.value().lock_or_panic().handle_announcement(peer) } else { tracing::debug!("Inserting new torrent: {:?}", info_hash); @@ -66,7 +66,7 @@ impl TorrentRepository { let mut torrent_guard = inserted_entry.value().lock_or_panic(); - torrent_guard.upsert_peer(peer) + torrent_guard.handle_announcement(peer) } } @@ -202,7 +202,7 @@ impl TorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().get_peers(Some(limit)), + Some(entry) => entry.lock_or_panic().swarm_peers(Some(limit)), } } @@ -573,8 +573,8 @@ mod tests { let torrent_entry_info = TorrentEntryInfo { swarm_metadata: torrent_guard.get_swarm_metadata(), - peers: torrent_guard.get_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.get_peers_len(), + peers: torrent_guard.swarm_peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.swarm_len(), }; drop(torrent_guard); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index ffa3c6d71..f8be53361 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -29,22 +29,22 @@ impl Torrent { pub(crate) fn peers_is_empty(&self) -> bool { match self { - Torrent::Single(entry) => entry.peers_is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers_is_empty(), + Torrent::Single(entry) => entry.swarm_is_empty(), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { - Torrent::Single(entry) => entry.get_peers_len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_len(), + Torrent::Single(entry) => entry.swarm_len(), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.get_peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers(limit), + Torrent::Single(entry) => entry.swarm_peers(limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_peers(limit), } } @@ -57,8 +57,8 @@ impl Torrent { pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { - Torrent::Single(entry) => entry.upsert_peer(peer), - Torrent::MutexStd(entry) => entry.lock_or_panic().upsert_peer(peer), + Torrent::Single(entry) => entry.handle_announcement(peer), + Torrent::MutexStd(entry) => entry.lock_or_panic().handle_announcement(peer), } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 9701fc53d..40dcff6db 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -34,14 +34,14 @@ fn default() -> Entries { #[fixture] fn started() -> Entries { let mut torrent = TrackedTorrent::default(); - torrent.upsert_peer(&a_started_peer(1)); + torrent.handle_announcement(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { let mut torrent = TrackedTorrent::default(); - torrent.upsert_peer(&a_completed_peer(2)); + torrent.handle_announcement(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } @@ -49,10 +49,10 @@ fn completed() -> Entries { fn downloaded() -> Entries { let mut torrent = TrackedTorrent::default(); let mut peer = a_started_peer(3); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![(InfoHash::default(), torrent)] } @@ -60,21 +60,21 @@ fn downloaded() -> Entries { fn three() -> Entries { let mut started = TrackedTorrent::default(); let started_h = &mut DefaultHasher::default(); - started.upsert_peer(&a_started_peer(1)); + started.handle_announcement(&a_started_peer(1)); started.hash(started_h); let mut completed = TrackedTorrent::default(); let completed_h = &mut DefaultHasher::default(); - completed.upsert_peer(&a_completed_peer(2)); + completed.handle_announcement(&a_completed_peer(2)); completed.hash(completed_h); let mut downloaded = TrackedTorrent::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.upsert_peer(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer); downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes::new(0); - downloaded.upsert_peer(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer); downloaded.hash(downloaded_h); vec![ @@ -90,7 +90,7 @@ fn many_out_of_order() -> Entries { for i in 0..408 { let mut entry = TrackedTorrent::default(); - entry.upsert_peer(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); } @@ -105,7 +105,7 @@ fn many_hashed_in_order() -> Entries { for i in 0..408 { let mut entry = TrackedTorrent::default(); - entry.upsert_peer(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -457,7 +457,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(entry.get_peers(None).contains(&peer.into())); + assert!(entry.swarm_peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). @@ -469,7 +469,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(!entry.get_peers(None).contains(&peer.into())); + assert!(!entry.swarm_peers(None).contains(&peer.into())); } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 6174190dc..ece0c87e6 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -660,7 +660,7 @@ mod tests { assert_eq!(torrent_entry.lock_or_panic().get_swarm_metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().peers_is_empty()); + assert!(torrent_entry.lock_or_panic().swarm_is_empty()); } } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 37846b4e3..b748cd3a0 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -101,7 +101,7 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc Date: Tue, 6 May 2025 17:21:36 +0100 Subject: [PATCH 008/247] refactor: [#1495] make TrackedTorrent fields private --- packages/torrent-repository/src/entry/torrent.rs | 9 +++++++-- packages/torrent-repository/src/repository.rs | 16 ++-------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index b92ca5243..25c76c25c 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -19,15 +19,20 @@ use super::swarm::Swarm; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent. - pub(crate) swarm: Swarm, + swarm: Swarm, /// The number of peers that have ever completed downloading the torrent. /// This value is can be persistent so it's loaded from the database when /// the tracker starts. - pub(crate) downloaded: u32, + downloaded: u32, } impl TrackedTorrent { + #[must_use] + pub fn new(swarm: Swarm, downloaded: u32) -> Self { + Self { swarm, downloaded } + } + #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { let metadata = self.swarm.metadata(); diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 69bfcf17b..6977893b7 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,13 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new( - TrackedTorrent { - swarm: Swarm::default(), - downloaded: number_of_downloads, - } - .into(), - ) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; @@ -235,13 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new( - TrackedTorrent { - swarm: Swarm::default(), - downloaded: *completed, - } - .into(), - ); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), *completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From 3fb117b2b78768d26d9db31df56c6dd59909932e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:31:15 +0100 Subject: [PATCH 009/247] refactor: [#1495] initialize number of downloads in Swarm to persisted value --- packages/torrent-repository/src/entry/swarm.rs | 8 ++++++++ packages/torrent-repository/src/repository.rs | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 5d97655ea..44cdaf7aa 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -16,6 +16,14 @@ pub struct Swarm { } impl Swarm { + #[must_use] + pub fn new(downloaded: u32) -> Self { + Self { + peers: BTreeMap::new(), + metadata: SwarmMetadata::new(downloaded, 0, 0), + } + } + pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { let mut downloads_increased: bool = false; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 6977893b7..fa3d77f95 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,7 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), number_of_downloads).into()) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads), number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; From ec597f020e4d0a063ba9979cbe2038396272600c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:38:42 +0100 Subject: [PATCH 010/247] refactor: [#1495] get the number of downloads from Swarm instead of from TrackedTorrent --- packages/torrent-repository/src/entry/torrent.rs | 8 +------- packages/torrent-repository/src/repository.rs | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 25c76c25c..7a31ff5a0 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -35,13 +35,7 @@ impl TrackedTorrent { #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { - let metadata = self.swarm.metadata(); - - SwarmMetadata { - downloaded: self.downloaded, - complete: metadata.complete, - incomplete: metadata.incomplete, - } + self.swarm.metadata() } /// Returns true if the torrents meets the retention policy, meaning that diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index fa3d77f95..cb64474c8 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -229,7 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), *completed).into()); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed), *completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From 23ce6e4731e617c455a760e586c614e332813881 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:41:56 +0100 Subject: [PATCH 011/247] refactor: [#1495]remove unused field in TrackedTorrent --- .../torrent-repository/src/entry/torrent.rs | 19 ++++--------------- packages/torrent-repository/src/repository.rs | 4 ++-- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 7a31ff5a0..c13db59a1 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -20,17 +20,12 @@ use super::swarm::Swarm; pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent. swarm: Swarm, - - /// The number of peers that have ever completed downloading the torrent. - /// This value is can be persistent so it's loaded from the database when - /// the tracker starts. - downloaded: u32, } impl TrackedTorrent { #[must_use] - pub fn new(swarm: Swarm, downloaded: u32) -> Self { - Self { swarm, downloaded } + pub fn new(swarm: Swarm) -> Self { + Self { swarm } } #[must_use] @@ -43,7 +38,7 @@ impl TrackedTorrent { #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { // code-review: why? - if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + if policy.persistent_torrent_completed_stat && self.get_swarm_metadata().downloaded > 0 { return true; } @@ -75,13 +70,7 @@ impl TrackedTorrent { } pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { - let downloads_increased = self.swarm.handle_announcement(peer); - - if downloads_increased { - self.downloaded += 1; - } - - downloads_increased + self.swarm.handle_announcement(peer) } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index cb64474c8..babca5f5d 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,7 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads), number_of_downloads).into()) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads)).into()) } else { TrackedTorrentHandle::default() }; @@ -229,7 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed), *completed).into()); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed)).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From ef7292f424158b07789da2d9b883ac7a8853e230 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:52:00 +0100 Subject: [PATCH 012/247] refactor: [#1495] move logic from TrackedTorrent to Swarm --- packages/torrent-repository/src/entry/swarm.rs | 17 +++++++++++++++++ .../torrent-repository/src/entry/torrent.rs | 13 +------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 44cdaf7aa..eb7aebfe4 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -5,6 +5,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; +use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -188,6 +189,22 @@ impl Swarm { pub fn is_empty(&self) -> bool { self.peers.is_empty() } + + /// Returns true if the torrents meets the retention policy, meaning that + /// it should be kept in the tracker. + #[must_use] + pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + // code-review: why? + if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.is_empty() { + return false; + } + + true + } } #[cfg(test)] diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index c13db59a1..44d5f226a 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -33,20 +33,9 @@ impl TrackedTorrent { self.swarm.metadata() } - /// Returns true if the torrents meets the retention policy, meaning that - /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - // code-review: why? - if policy.persistent_torrent_completed_stat && self.get_swarm_metadata().downloaded > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.swarm.is_empty() { - return false; - } - - true + self.swarm.meets_retaining_policy(policy) } #[must_use] From b6afed5c9f2900d41c02478d73aaa7f53f70b6fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:54:05 +0100 Subject: [PATCH 013/247] refactor: [#1495] rename methods --- .../torrent-repository/src/entry/torrent.rs | 12 +++++----- packages/torrent-repository/src/repository.rs | 16 ++++++------- .../tests/common/torrent.rs | 24 +++++++++---------- .../tests/repository/mod.rs | 6 ++--- packages/tracker-core/src/announce_handler.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 8 +++---- 7 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 44d5f226a..69a809a37 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -29,7 +29,7 @@ impl TrackedTorrent { } #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { + pub fn metadata(&self) -> SwarmMetadata { self.swarm.metadata() } @@ -39,22 +39,22 @@ impl TrackedTorrent { } #[must_use] - pub fn swarm_is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.swarm.is_empty() } #[must_use] - pub fn swarm_len(&self) -> usize { + pub fn len(&self) -> usize { self.swarm.len() } #[must_use] - pub fn swarm_peers(&self, limit: Option) -> Vec> { + pub fn peers(&self, limit: Option) -> Vec> { self.swarm.peers(limit) } #[must_use] - pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + pub fn peers_excluding(&self, client: &SocketAddr, limit: Option) -> Vec> { self.swarm.peers_excluding(client, limit) } @@ -62,7 +62,7 @@ impl TrackedTorrent { self.swarm.handle_announcement(peer) } - pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.swarm.remove_inactive(current_cutoff); } } diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index babca5f5d..1706937fc 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -84,7 +84,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { for entry in &self.torrents { - entry.value().lock_or_panic().remove_inactive_peers(current_cutoff); + entry.value().lock_or_panic().remove_inactive(current_cutoff); } } @@ -139,7 +139,7 @@ impl TorrentRepository { pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { self.torrents .get(info_hash) - .map(|entry| entry.value().lock_or_panic().get_swarm_metadata()) + .map(|entry| entry.value().lock_or_panic().metadata()) } /// Retrieves swarm metadata for a given torrent. @@ -175,7 +175,7 @@ impl TorrentRepository { pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().get_peers_for_client(&peer.peer_addr, Some(limit)), + Some(entry) => entry.lock_or_panic().peers_excluding(&peer.peer_addr, Some(limit)), } } @@ -196,7 +196,7 @@ impl TorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().swarm_peers(Some(limit)), + Some(entry) => entry.lock_or_panic().peers(Some(limit)), } } @@ -255,7 +255,7 @@ impl TorrentRepository { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { - let stats = entry.value().lock_or_panic().get_swarm_metadata(); + let stats = entry.value().lock_or_panic().metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); @@ -560,9 +560,9 @@ mod tests { let torrent_guard = self.lock_or_panic(); let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.get_swarm_metadata(), - peers: torrent_guard.swarm_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.swarm_len(), + swarm_metadata: torrent_guard.metadata(), + peers: torrent_guard.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.len(), }; drop(torrent_guard); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index f8be53361..242ffec70 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -15,8 +15,8 @@ pub(crate) enum Torrent { impl Torrent { pub(crate) fn get_stats(&self) -> SwarmMetadata { match self { - Torrent::Single(entry) => entry.get_swarm_metadata(), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_swarm_metadata(), + Torrent::Single(entry) => entry.metadata(), + Torrent::MutexStd(entry) => entry.lock_or_panic().metadata(), } } @@ -29,29 +29,29 @@ impl Torrent { pub(crate) fn peers_is_empty(&self) -> bool { match self { - Torrent::Single(entry) => entry.swarm_is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_is_empty(), + Torrent::Single(entry) => entry.is_empty(), + Torrent::MutexStd(entry) => entry.lock_or_panic().is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { - Torrent::Single(entry) => entry.swarm_len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_len(), + Torrent::Single(entry) => entry.len(), + Torrent::MutexStd(entry) => entry.lock_or_panic().len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.swarm_peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_peers(limit), + Torrent::Single(entry) => entry.peers(limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().peers(limit), } } pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.get_peers_for_client(client, limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_for_client(client, limit), + Torrent::Single(entry) => entry.peers_excluding(client, limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().peers_excluding(client, limit), } } @@ -64,8 +64,8 @@ impl Torrent { pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { match self { - Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive_peers(current_cutoff), + Torrent::Single(entry) => entry.remove_inactive(current_cutoff), + Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive(current_cutoff), } } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 40dcff6db..783606a40 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -320,7 +320,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRep let mut metrics = AggregateSwarmMetadata::default(); for (_, torrent) in entries { - let stats = torrent.get_swarm_metadata(); + let stats = torrent.metadata(); metrics.total_torrents += 1; metrics.total_incomplete += u64::from(stats.incomplete); @@ -457,7 +457,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(entry.swarm_peers(None).contains(&peer.into())); + assert!(entry.peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). @@ -469,7 +469,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(!entry.swarm_peers(None).contains(&peer.into())); + assert!(!entry.peers(None).contains(&peer.into())); } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index ece0c87e6..fac0a38c8 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -657,10 +657,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.lock_or_panic().get_swarm_metadata().downloaded, 1); + assert_eq!(torrent_entry.lock_or_panic().metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().swarm_is_empty()); + assert!(torrent_entry.lock_or_panic().is_empty()); } } diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index ae7c61741..5c8352f11 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -165,7 +165,7 @@ mod tests { .get(&infohash) .unwrap() .lock_or_panic() - .get_swarm_metadata() + .metadata() .downloaded, 1 ); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index b748cd3a0..a35fd7aed 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -99,9 +99,9 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.lock_or_panic().get_swarm_metadata(); + let stats = torrent_entry.lock_or_panic().metadata(); basic_infos.push(BasicInfo { info_hash, @@ -184,7 +184,7 @@ pub fn get_torrents(in_memory_torrent_repository: &Arc Date: Tue, 6 May 2025 18:05:59 +0100 Subject: [PATCH 014/247] refactor: [#1495] remove unneeded TrackedTorrent (wrapper over Swarm) --- packages/torrent-repository/src/entry/mod.rs | 1 - .../torrent-repository/src/entry/torrent.rs | 68 ------------------- packages/torrent-repository/src/lib.rs | 10 +-- packages/torrent-repository/src/repository.rs | 5 +- .../tests/common/torrent.rs | 2 +- .../torrent-repository/tests/entry/mod.rs | 2 +- .../tests/repository/mod.rs | 30 ++++---- 7 files changed, 24 insertions(+), 94 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/torrent.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 94fdcc58e..899c10d57 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,2 +1 @@ pub mod swarm; -pub mod torrent; diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs deleted file mode 100644 index 69a809a37..000000000 --- a/packages/torrent-repository/src/entry/torrent.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::fmt::Debug; -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::peer::{self}; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use super::swarm::Swarm; - -/// A data structure containing all the information about a torrent in the -/// tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// -/// The tracker keeps one entry like this for every torrent. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct TrackedTorrent { - /// A network of peers that are all trying to download the torrent. - swarm: Swarm, -} - -impl TrackedTorrent { - #[must_use] - pub fn new(swarm: Swarm) -> Self { - Self { swarm } - } - - #[must_use] - pub fn metadata(&self) -> SwarmMetadata { - self.swarm.metadata() - } - - #[must_use] - pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - self.swarm.meets_retaining_policy(policy) - } - - #[must_use] - pub fn is_empty(&self) -> bool { - self.swarm.is_empty() - } - - #[must_use] - pub fn len(&self) -> usize { - self.swarm.len() - } - - #[must_use] - pub fn peers(&self, limit: Option) -> Vec> { - self.swarm.peers(limit) - } - - #[must_use] - pub fn peers_excluding(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.peers_excluding(client, limit) - } - - pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { - self.swarm.handle_announcement(peer) - } - - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm.remove_inactive(current_cutoff); - } -} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index d7042a1fd..12b205681 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -6,8 +6,8 @@ use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; -pub type TrackedTorrentHandle = Arc>; -pub type TrackedTorrent = entry::torrent::TrackedTorrent; +pub type TrackedTorrentHandle = Arc>; +pub type Swarm = entry::swarm::Swarm; /// Working version, for production. #[cfg(not(test))] @@ -20,11 +20,11 @@ pub(crate) type CurrentClock = clock::Working; pub(crate) type CurrentClock = clock::Stopped; pub trait LockTrackedTorrent { - fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent>; + fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } -impl LockTrackedTorrent for Arc> { - fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent> { +impl LockTrackedTorrent for Arc> { + fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { self.lock().expect("can't acquire lock for tracked torrent handle") } } diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 1706937fc..2a5a38a3f 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -8,7 +8,6 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::entry::swarm::Swarm; -use crate::entry::torrent::TrackedTorrent; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Default, Debug)] @@ -51,7 +50,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads)).into()) + TrackedTorrentHandle::new(Swarm::new(number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; @@ -229,7 +228,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed)).into()); + let entry = TrackedTorrentHandle::new(Swarm::new(*completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 242ffec70..e991cc7c9 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -8,7 +8,7 @@ use torrust_tracker_torrent_repository::{entry, LockTrackedTorrent, TrackedTorre #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(entry::torrent::TrackedTorrent), + Single(entry::swarm::Swarm), MutexStd(TrackedTorrentHandle), } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 5f958f05c..ab1848ed1 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -17,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(entry::torrent::TrackedTorrent::default()) + Torrent::Single(entry::swarm::Swarm::default()) } #[fixture] fn mutex_std() -> Torrent { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 783606a40..3515a38cc 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; +use torrust_tracker_torrent_repository::entry::swarm::Swarm; use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -19,7 +19,7 @@ fn skip_list_mutex_std() -> TorrentRepository { TorrentRepository::default() } -type Entries = Vec<(InfoHash, TrackedTorrent)>; +type Entries = Vec<(InfoHash, Swarm)>; #[fixture] fn empty() -> Entries { @@ -28,26 +28,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), TrackedTorrent::default())] + vec![(InfoHash::default(), Swarm::default())] } #[fixture] fn started() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); torrent.handle_announcement(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); torrent.handle_announcement(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); let mut peer = a_started_peer(3); torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; @@ -58,17 +58,17 @@ fn downloaded() -> Entries { #[fixture] fn three() -> Entries { - let mut started = TrackedTorrent::default(); + let mut started = Swarm::default(); let started_h = &mut DefaultHasher::default(); started.handle_announcement(&a_started_peer(1)); started.hash(started_h); - let mut completed = TrackedTorrent::default(); + let mut completed = Swarm::default(); let completed_h = &mut DefaultHasher::default(); completed.handle_announcement(&a_completed_peer(2)); completed.hash(completed_h); - let mut downloaded = TrackedTorrent::default(); + let mut downloaded = Swarm::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.handle_announcement(&downloaded_peer); @@ -86,10 +86,10 @@ fn three() -> Entries { #[fixture] fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, TrackedTorrent)> = HashSet::default(); + let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { - let mut entry = TrackedTorrent::default(); + let mut entry = Swarm::default(); entry.handle_announcement(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); @@ -101,10 +101,10 @@ fn many_out_of_order() -> Entries { #[fixture] fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); + let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = TrackedTorrent::default(); + let mut entry = Swarm::default(); entry.handle_announcement(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); @@ -269,7 +269,7 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + let torrents: Vec<(InfoHash, Swarm)> = repo .get_paginated(Some(&paginated)) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) @@ -492,7 +492,7 @@ async fn it_should_remove_peerless_torrents( repo.remove_peerless_torrents(&policy); - let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + let torrents: Vec<(InfoHash, Swarm)> = repo .get_paginated(None) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) From 030ae26bd27a8742c757badb992d807d9af7171b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:12:14 +0100 Subject: [PATCH 015/247] refactor: [#1495] reorganize torrent-repository mod --- packages/torrent-repository/src/entry/mod.rs | 1 - packages/torrent-repository/src/lib.rs | 4 ++-- packages/torrent-repository/src/repository.rs | 2 +- packages/torrent-repository/src/{entry => }/swarm.rs | 10 +++++----- packages/torrent-repository/tests/common/torrent.rs | 4 ++-- packages/torrent-repository/tests/entry/mod.rs | 4 ++-- packages/torrent-repository/tests/repository/mod.rs | 2 +- 7 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/mod.rs rename packages/torrent-repository/src/{entry => }/swarm.rs (99%) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs deleted file mode 100644 index 899c10d57..000000000 --- a/packages/torrent-repository/src/entry/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod swarm; diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 12b205681..3748cb171 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,5 @@ -pub mod entry; pub mod repository; +pub mod swarm; use std::sync::{Arc, Mutex, MutexGuard}; @@ -7,7 +7,7 @@ use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; pub type TrackedTorrentHandle = Arc>; -pub type Swarm = entry::swarm::Swarm; +pub type Swarm = swarm::Swarm; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 2a5a38a3f..2c1330c20 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use crate::entry::swarm::Swarm; +use crate::swarm::Swarm; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Default, Debug)] diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/swarm.rs similarity index 99% rename from packages/torrent-repository/src/entry/swarm.rs rename to packages/torrent-repository/src/swarm.rs index eb7aebfe4..78602f3d9 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -218,7 +218,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { @@ -483,7 +483,7 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { @@ -532,7 +532,7 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { @@ -572,7 +572,7 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { @@ -611,7 +611,7 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index e991cc7c9..197032cb4 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{entry, LockTrackedTorrent, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(entry::swarm::Swarm), + Single(swarm::Swarm), MutexStd(TrackedTorrentHandle), } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index ab1848ed1..9b16f8c4a 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{entry, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, TrackedTorrentHandle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -17,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(entry::swarm::Swarm::default()) + Torrent::Single(swarm::Swarm::default()) } #[fixture] fn mutex_std() -> Torrent { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 3515a38cc..1595db335 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::swarm::Swarm; +use torrust_tracker_torrent_repository::swarm::Swarm; use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; From 78d4b83b4e3ab36bd9f8252768142b09f74c6786 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:13:09 +0100 Subject: [PATCH 016/247] refactor: [#1495] rename TrackedTorrentHandle to SwarmHandle --- packages/torrent-repository/src/lib.rs | 2 +- packages/torrent-repository/src/repository.rs | 20 +++++++++---------- .../tests/common/torrent.rs | 4 ++-- .../torrent-repository/tests/entry/mod.rs | 4 ++-- .../src/torrent/repository/in_memory.rs | 8 ++++---- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 3748cb171..76ef6c784 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; -pub type TrackedTorrentHandle = Arc>; +pub type SwarmHandle = Arc>; pub type Swarm = swarm::Swarm; /// Working version, for production. diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 2c1330c20..fd30b4714 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -8,11 +8,11 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::{LockTrackedTorrent, TrackedTorrentHandle}; +use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] pub struct TorrentRepository { - pub torrents: SkipMap, + pub torrents: SkipMap, } impl TorrentRepository { @@ -50,9 +50,9 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(Swarm::new(number_of_downloads).into()) + SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { - TrackedTorrentHandle::default() + SwarmHandle::default() }; let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); @@ -69,7 +69,7 @@ impl TorrentRepository { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { + pub fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key).map(|entry| entry.value().clone()) } @@ -93,7 +93,7 @@ impl TorrentRepository { /// /// An `Option` containing the tracked torrent handle if found. #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -108,7 +108,7 @@ impl TorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { match pagination { Some(pagination) => self .torrents @@ -228,7 +228,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(Swarm::new(*completed).into()); + let entry = SwarmHandle::new(Swarm::new(*completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -541,7 +541,7 @@ mod tests { use crate::repository::TorrentRepository; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{LockTrackedTorrent, TrackedTorrentHandle}; + use crate::{LockTrackedTorrent, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -554,7 +554,7 @@ mod tests { } #[allow(clippy::from_over_into)] - impl Into for TrackedTorrentHandle { + impl Into for SwarmHandle { fn into(self) -> TorrentEntryInfo { let torrent_guard = self.lock_or_panic(); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 197032cb4..a1899621f 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, SwarmHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(swarm::Swarm), - MutexStd(TrackedTorrentHandle), + MutexStd(SwarmHandle), } impl Torrent { diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 9b16f8c4a..4607fd9c7 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{swarm, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, SwarmHandle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -21,7 +21,7 @@ fn single() -> Torrent { } #[fixture] fn mutex_std() -> Torrent { - Torrent::MutexStd(TrackedTorrentHandle::default()) + Torrent::MutexStd(SwarmHandle::default()) } #[fixture] diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e362b20c1..98d7eb682 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{TorrentRepository, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; /// In-memory repository for torrent entries. /// @@ -64,7 +64,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } @@ -104,7 +104,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } @@ -122,7 +122,7 @@ impl InMemoryTorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { self.torrents.get_paginated(pagination) } From 0411a9a464554e039cbdd806c95b9bdd443ef155 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:16:05 +0100 Subject: [PATCH 017/247] refactor: [#1495] rename TorrentRepository to Swarms --- packages/torrent-repository/src/lib.rs | 4 +- .../src/{repository.rs => swarms.rs} | 108 +++++++++--------- .../tests/repository/mod.rs | 26 ++--- .../src/torrent/repository/in_memory.rs | 4 +- 4 files changed, 71 insertions(+), 71 deletions(-) rename packages/torrent-repository/src/{repository.rs => swarms.rs} (90%) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 76ef6c784..f120afe88 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,11 +1,11 @@ -pub mod repository; +pub mod swarms; pub mod swarm; use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; -pub type TorrentRepository = repository::TorrentRepository; +pub type Swarms = swarms::Swarms; pub type SwarmHandle = Arc>; pub type Swarm = swarm::Swarm; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/swarms.rs similarity index 90% rename from packages/torrent-repository/src/repository.rs rename to packages/torrent-repository/src/swarms.rs index fd30b4714..b5b891a2b 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -11,11 +11,11 @@ use crate::swarm::Swarm; use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] -pub struct TorrentRepository { - pub torrents: SkipMap, +pub struct Swarms { + pub swarms: SkipMap, } -impl TorrentRepository { +impl Swarms { /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -42,7 +42,7 @@ impl TorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - if let Some(existing_entry) = self.torrents.get(info_hash) { + if let Some(existing_entry) = self.swarms.get(info_hash) { tracing::debug!("Torrent already exists: {:?}", info_hash); existing_entry.value().lock_or_panic().handle_announcement(peer) @@ -55,7 +55,7 @@ impl TorrentRepository { SwarmHandle::default() }; - let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); + let inserted_entry = self.swarms.get_or_insert(*info_hash, new_entry); let mut torrent_guard = inserted_entry.value().lock_or_panic(); @@ -70,7 +70,7 @@ impl TorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[must_use] pub fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|entry| entry.value().clone()) + self.swarms.remove(key).map(|entry| entry.value().clone()) } /// Removes inactive peers from all torrent entries. @@ -82,7 +82,7 @@ impl TorrentRepository { /// /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.torrents { + for entry in &self.swarms { entry.value().lock_or_panic().remove_inactive(current_cutoff); } } @@ -94,7 +94,7 @@ impl TorrentRepository { /// An `Option` containing the tracked torrent handle if found. #[must_use] pub fn get(&self, key: &InfoHash) -> Option { - let maybe_entry = self.torrents.get(key); + let maybe_entry = self.swarms.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -111,14 +111,14 @@ impl TorrentRepository { pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { match pagination { Some(pagination) => self - .torrents + .swarms .iter() .skip(pagination.offset as usize) .take(pagination.limit as usize) .map(|entry| (*entry.key(), entry.value().clone())) .collect(), None => self - .torrents + .swarms .iter() .map(|entry| (*entry.key(), entry.value().clone())) .collect(), @@ -136,7 +136,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. #[must_use] pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents + self.swarms .get(info_hash) .map(|entry| entry.value().lock_or_panic().metadata()) } @@ -208,7 +208,7 @@ impl TorrentRepository { /// /// This function panics if the lock for the entry cannot be obtained. pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.torrents { + for entry in &self.swarms { if entry.value().lock_or_panic().meets_retaining_policy(policy) { continue; } @@ -224,7 +224,7 @@ impl TorrentRepository { /// access. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { for (info_hash, completed) in persistent_torrents { - if self.torrents.contains_key(info_hash) { + if self.swarms.contains_key(info_hash) { continue; } @@ -232,7 +232,7 @@ impl TorrentRepository { // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. - self.torrents.get_or_insert(*info_hash, entry); + self.swarms.get_or_insert(*info_hash, entry); } } @@ -253,7 +253,7 @@ impl TorrentRepository { pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { let mut metrics = AggregateSwarmMetadata::default(); - for entry in &self.torrents { + for entry in &self.swarms { let stats = entry.value().lock_or_panic().metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); @@ -304,12 +304,12 @@ mod tests { use std::sync::Arc; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -320,7 +320,7 @@ mod tests { #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -340,13 +340,13 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -360,7 +360,7 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74); @@ -369,7 +369,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -402,13 +402,13 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let peers = torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); @@ -417,7 +417,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -431,7 +431,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -471,12 +471,12 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); @@ -488,7 +488,7 @@ mod tests { #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -502,8 +502,8 @@ mod tests { assert!(!torrent_repository.get_torrent_peers(&info_hash, 74).contains(&Arc::new(peer))); } - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let torrent_repository = Arc::new(TorrentRepository::default()); + fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let torrent_repository = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); @@ -539,7 +539,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; use crate::{LockTrackedTorrent, SwarmHandle}; @@ -572,7 +572,7 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -600,13 +600,13 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn without_pagination() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -638,8 +638,8 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -647,7 +647,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -682,7 +682,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -717,7 +717,7 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -745,14 +745,14 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); @@ -769,7 +769,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); @@ -788,7 +788,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); @@ -807,7 +807,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); @@ -826,7 +826,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -858,12 +858,12 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let infohash = sample_info_hash(); @@ -883,7 +883,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); @@ -897,12 +897,12 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let infohash = sample_info_hash(); diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 1595db335..4c9053b7e 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -10,13 +10,13 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; +use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] -fn skip_list_mutex_std() -> TorrentRepository { - TorrentRepository::default() +fn skip_list_mutex_std() -> Swarms { + Swarms::default() } type Entries = Vec<(InfoHash, Swarm)>; @@ -148,10 +148,10 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -fn make(repo: &TorrentRepository, entries: &Entries) { +fn make(repo: &Swarms, entries: &Entries) { for (info_hash, entry) in entries { let new = Arc::new(Mutex::new(entry.clone())); - repo.torrents.insert(*info_hash, new); + repo.swarms.insert(*info_hash, new); } } @@ -200,7 +200,7 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, torrent)) = entries.first() { @@ -224,7 +224,7 @@ async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: To #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -257,7 +257,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -312,7 +312,7 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; make(&repo, &entries); @@ -342,7 +342,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRep #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { @@ -370,7 +370,7 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); for (info_hash, torrent) in entries { @@ -397,7 +397,7 @@ async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Torren #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -484,7 +484,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 98d7eb682..67e532e86 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; +use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. /// @@ -21,7 +21,7 @@ use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc, + torrents: Arc, } impl InMemoryTorrentRepository { From 0f4596ef7de53e5806520cb7126e8234d28ab9ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:31:21 +0100 Subject: [PATCH 018/247] fix: [#1495] formatting --- packages/torrent-repository/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index f120afe88..c985f7a2b 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,5 @@ -pub mod swarms; pub mod swarm; +pub mod swarms; use std::sync::{Arc, Mutex, MutexGuard}; From 34c159a161b7c167730f6c139dd3cb608173d37a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:48:48 +0100 Subject: [PATCH 019/247] refactor: [#1495] update method Swarm::meets_retaining_policy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changed from: ``` /// Returns true if the torrents meets the retention policy, meaning that /// it should be kept in the tracker. pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } if policy.remove_peerless_torrents && self.is_empty() { return false; } true } ``` To: ``` pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { !(policy.remove_peerless_torrents && self.is_empty()) } ``` I think the first condition was introduced to avoid loosing the number of downloads we¡hen the torrent is removed becuase there are no peers. Now, we load that number from database when the torrent is added again after removing it from the tracker. --- packages/torrent-repository/src/swarm.rs | 38 +++++++++++++++++------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 78602f3d9..1a17a2fb6 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -190,20 +190,11 @@ impl Swarm { self.peers.is_empty() } - /// Returns true if the torrents meets the retention policy, meaning that + /// Returns true if the swarm meets the retention policy, meaning that /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - // code-review: why? - if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.is_empty() { - return false; - } - - true + !(policy.remove_peerless_torrents && self.is_empty()) } } @@ -214,6 +205,7 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -384,6 +376,30 @@ mod tests { assert_eq!(swarm.len(), 1); } + #[test] + fn it_should_be_kept_when_empty_if_the_tracker_policy_is_not_to_remove_peerless_torrents() { + let empty_swarm = Swarm::default(); + + let policy = TrackerPolicy { + remove_peerless_torrents: false, + ..Default::default() + }; + + assert!(empty_swarm.meets_retaining_policy(&policy)); + } + + #[test] + fn it_should_be_removed_when_empty_if_the_tracker_policy_is_to_remove_peerless_torrents() { + let empty_swarm = Swarm::default(); + + let policy = TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + assert!(!empty_swarm.meets_retaining_policy(&policy)); + } + #[test] fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); From 728de220693828e056b8f5069ddff19589b6825a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:55:41 +0100 Subject: [PATCH 020/247] docs: [#1495] add todo --- packages/torrent-repository/src/swarms.rs | 1 + packages/torrent-repository/tests/repository/mod.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index b5b891a2b..936f49d22 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -12,6 +12,7 @@ use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] pub struct Swarms { + // todo: this needs to be public only to insert a peerless torrent (empty swarm). pub swarms: SkipMap, } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 4c9053b7e..071a187fa 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -151,6 +151,7 @@ fn persistent_three() -> PersistentTorrents { fn make(repo: &Swarms, entries: &Entries) { for (info_hash, entry) in entries { let new = Arc::new(Mutex::new(entry.clone())); + // todo: use a public method to insert an empty swarm. repo.swarms.insert(*info_hash, new); } } From 6f5cb279083ee3b8b47f849e111019dfdea9c3b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 10:03:18 +0100 Subject: [PATCH 021/247] refactor: [#1495] remove test for SwarmHandle Integration tests will be removed becuase unit tests have been added. Besides, there is no point in testing only the wrapper. SwarmHandle in only a wrapper over Swarm. --- .../torrent-repository/tests/common/mod.rs | 1 - .../tests/common/torrent.rs | 71 ---------- .../torrent-repository/tests/entry/mod.rs | 127 ++++++++---------- 3 files changed, 55 insertions(+), 144 deletions(-) delete mode 100644 packages/torrent-repository/tests/common/torrent.rs diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs index e083a05cc..c77ca2769 100644 --- a/packages/torrent-repository/tests/common/mod.rs +++ b/packages/torrent-repository/tests/common/mod.rs @@ -1,2 +1 @@ -pub mod torrent; pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs deleted file mode 100644 index a1899621f..000000000 --- a/packages/torrent-repository/tests/common/torrent.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, SwarmHandle}; - -#[derive(Debug, Clone)] -pub(crate) enum Torrent { - Single(swarm::Swarm), - MutexStd(SwarmHandle), -} - -impl Torrent { - pub(crate) fn get_stats(&self) -> SwarmMetadata { - match self { - Torrent::Single(entry) => entry.metadata(), - Torrent::MutexStd(entry) => entry.lock_or_panic().metadata(), - } - } - - pub(crate) fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - match self { - Torrent::Single(entry) => entry.meets_retaining_policy(policy), - Torrent::MutexStd(entry) => entry.lock_or_panic().meets_retaining_policy(policy), - } - } - - pub(crate) fn peers_is_empty(&self) -> bool { - match self { - Torrent::Single(entry) => entry.is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().is_empty(), - } - } - - pub(crate) fn get_peers_len(&self) -> usize { - match self { - Torrent::Single(entry) => entry.len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().len(), - } - } - - pub(crate) fn get_peers(&self, limit: Option) -> Vec> { - match self { - Torrent::Single(entry) => entry.peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers(limit), - } - } - - pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - match self { - Torrent::Single(entry) => entry.peers_excluding(client, limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers_excluding(client, limit), - } - } - - pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - match self { - Torrent::Single(entry) => entry.handle_announcement(peer), - Torrent::MutexStd(entry) => entry.lock_or_panic().handle_announcement(peer), - } - } - - pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - match self { - Torrent::Single(entry) => entry.remove_inactive(current_cutoff), - Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive(current_cutoff), - } - } -} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 4607fd9c7..491b77a90 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,19 +9,14 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{swarm, SwarmHandle}; +use torrust_tracker_torrent_repository::Swarm; -use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; use crate::CurrentClock; #[fixture] -fn single() -> Torrent { - Torrent::Single(swarm::Swarm::default()) -} -#[fixture] -fn mutex_std() -> Torrent { - Torrent::MutexStd(SwarmHandle::default()) +fn single() -> Swarm { + Swarm::default() } #[fixture] @@ -52,39 +47,39 @@ pub enum Makes { Three, } -fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { +fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.upsert_peer(&peer_1); + torrent.handle_announcement(&peer_1); let peer_2 = a_completed_peer(2); - torrent.upsert_peer(&peer_2); + torrent.handle_announcement(&peer_2); let mut peer_3 = a_started_peer(3); - torrent.upsert_peer(&peer_3); + torrent.handle_announcement(&peer_3); peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer_3); + torrent.handle_announcement(&peer_3); vec![peer_1, peer_2, peer_3] } } @@ -93,10 +88,10 @@ fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { #[rstest] #[case::empty(&Makes::Empty)] #[tokio::test] -async fn it_should_be_empty_by_default(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); - assert_eq!(torrent.get_peers_len(), 0); + assert_eq!(torrent.len(), 0); } #[rstest] @@ -107,14 +102,14 @@ async fn it_should_be_empty_by_default(#[values(single(), mutex_std())] mut torr #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(single(), mutex_std())] mut torrent: Torrent, + #[values(single())] mut torrent: Swarm, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { make(&mut torrent, makes); - let has_peers = !torrent.peers_is_empty(); - let has_downloads = torrent.get_stats().downloaded != 0; + let has_peers = !torrent.is_empty(); + let has_downloads = torrent.metadata().downloaded != 0; match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { // remove torrents without peers, and keep completed download stats @@ -144,10 +139,10 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); - let torrent_peers = torrent.get_peers(None); + let torrent_peers = torrent.peers(None); assert_eq!(torrent_peers.len(), peers.len()); @@ -163,15 +158,15 @@ async fn it_should_get_peers_for_torrent_entry(#[values(single(), mutex_std())] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // Get the Inserted Peer by Id. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let original = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -181,10 +176,10 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: T // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // Get the Updated Peer by Id. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let updated = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -200,20 +195,17 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: T #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; make(&mut torrent, makes); let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // The started peer should be inserted. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let original = peers .iter() .find(|p| p.get_id() == peer.get_id()) @@ -223,10 +215,10 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // It should be removed now. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); assert_eq!( peers.iter().find(|p| p.get_id() == peer.get_id()), @@ -242,13 +234,13 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single(), mutex_std())] mut torrent: Torrent, + #[values(single())] mut torrent: Swarm, #[case] makes: &Makes, ) { make(&mut torrent, makes); - let downloaded = torrent.get_stats().downloaded; + let downloaded = torrent.metadata().downloaded; - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_completed = peer.event == AnnounceEvent::Completed; @@ -256,8 +248,8 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if is_already_completed { assert_eq!(stats.downloaded, downloaded); @@ -272,19 +264,19 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if is_already_non_left { // it was already complete @@ -301,19 +293,19 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(single(), mutex_std())] mu #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if completed_already { // now it is incomplete @@ -330,13 +322,10 @@ async fn it_should_update_a_peer_as_incomplete(#[values(single(), mutex_std())] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); @@ -345,14 +334,14 @@ async fn it_should_get_peers_excluding_the_client_socket( assert_ne!(peer.peer_addr, socket); // it should get the peer as it dose not share the socket. - assert!(torrent.get_peers_for_client(&socket, None).contains(&peer.into())); + assert!(torrent.peers_excluding(&socket, None).contains(&peer.into())); // set the address to the socket. peer.peer_addr = socket; - torrent.upsert_peer(&peer); // Add peer + torrent.handle_announcement(&peer); // Add peer // It should not include the peer that has the same socket. - assert!(!torrent.get_peers_for_client(&socket, None).contains(&peer.into())); + assert!(!torrent.peers_excluding(&socket, None).contains(&peer.into())); } #[rstest] @@ -362,19 +351,16 @@ async fn it_should_get_peers_excluding_the_client_socket( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_limit_the_number_of_peers_returned( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); } - let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)); + let peers = torrent.peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } @@ -386,10 +372,7 @@ async fn it_should_limit_the_number_of_peers_returned( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); @@ -402,12 +385,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( peer.updated = now.sub(EXPIRE); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); - assert_eq!(torrent.get_peers_len(), peers.len() + 1); + assert_eq!(torrent.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - torrent.remove_inactive_peers(current_cutoff); + torrent.remove_inactive(current_cutoff); - assert_eq!(torrent.get_peers_len(), peers.len()); + assert_eq!(torrent.len(), peers.len()); } From 5413e597b7054a4ea7f32a4f36ce9b801c78e832 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 10:15:40 +0100 Subject: [PATCH 022/247] refactor: [#1495] renamings to follow latest changes in torrent-repository pkg --- .../torrent-repository/tests/integration.rs | 4 +- .../tests/{entry => swarm}/mod.rs | 128 +++++++++--------- .../tests/{repository => swarms}/mod.rs | 99 +++++++------- .../src/torrent/repository/in_memory.rs | 26 ++-- 4 files changed, 130 insertions(+), 127 deletions(-) rename packages/torrent-repository/tests/{entry => swarm}/mod.rs (73%) rename packages/torrent-repository/tests/{repository => swarms}/mod.rs (81%) diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs index 5aab67b03..b3e057075 100644 --- a/packages/torrent-repository/tests/integration.rs +++ b/packages/torrent-repository/tests/integration.rs @@ -7,8 +7,8 @@ use torrust_tracker_clock::clock; pub mod common; -mod entry; -mod repository; +mod swarm; +mod swarms; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs similarity index 73% rename from packages/torrent-repository/tests/entry/mod.rs rename to packages/torrent-repository/tests/swarm/mod.rs index 491b77a90..d529b0243 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -15,7 +15,7 @@ use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; use crate::CurrentClock; #[fixture] -fn single() -> Swarm { +fn swarm() -> Swarm { Swarm::default() } @@ -47,39 +47,39 @@ pub enum Makes { Three, } -fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { +fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.handle_announcement(&peer_1); + swarm.handle_announcement(&peer_1); let peer_2 = a_completed_peer(2); - torrent.handle_announcement(&peer_2); + swarm.handle_announcement(&peer_2); let mut peer_3 = a_started_peer(3); - torrent.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3); peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3); vec![peer_1, peer_2, peer_3] } } @@ -88,10 +88,10 @@ fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { #[rstest] #[case::empty(&Makes::Empty)] #[tokio::test] -async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); - assert_eq!(torrent.len(), 0); + assert_eq!(swarm.len(), 0); } #[rstest] @@ -102,33 +102,33 @@ async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, # #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(single())] mut torrent: Swarm, + #[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut torrent, makes); + make(&mut swarm, makes); - let has_peers = !torrent.is_empty(); - let has_downloads = torrent.metadata().downloaded != 0; + let has_peers = !swarm.is_empty(); + let has_downloads = swarm.metadata().downloaded != 0; match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { // remove torrents without peers, and keep completed download stats (true, true) => match (has_peers, has_downloads) { // no peers, but has downloads // peers, with or without downloads - (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (false, true) | (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), // no peers and no downloads - (false, false) => assert!(!torrent.meets_retaining_policy(&policy)), + (false, false) => assert!(!swarm.meets_retaining_policy(&policy)), }, // remove torrents without peers and drop completed download stats (true, false) => match (has_peers, has_downloads) { // peers, with or without downloads - (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), // no peers and with or without downloads - (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy)), + (false, true | false) => assert!(!swarm.meets_retaining_policy(&policy)), }, // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (false, true | false) => assert!(swarm.meets_retaining_policy(&policy)), } } @@ -139,10 +139,10 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); - let torrent_peers = torrent.peers(None); + let torrent_peers = swarm.peers(None); assert_eq!(torrent_peers.len(), peers.len()); @@ -158,15 +158,15 @@ async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // Get the Inserted Peer by Id. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let original = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -176,10 +176,10 @@ async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // Get the Updated Peer by Id. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let updated = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -195,17 +195,17 @@ async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { +async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut torrent, makes); + make(&mut swarm, makes); let mut peer = a_started_peer(-1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // The started peer should be inserted. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let original = peers .iter() .find(|p| p.get_id() == peer.get_id()) @@ -215,10 +215,10 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] m // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // It should be removed now. - let peers = torrent.peers(None); + let peers = swarm.peers(None); assert_eq!( peers.iter().find(|p| p.get_id() == peer.get_id()), @@ -234,7 +234,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] m #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single())] mut torrent: Swarm, + #[values(swarm())] mut torrent: Swarm, #[case] makes: &Makes, ) { make(&mut torrent, makes); @@ -264,19 +264,19 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); - let stats = torrent.metadata(); + swarm.handle_announcement(&peer); + let stats = swarm.metadata(); if is_already_non_left { // it was already complete @@ -293,19 +293,19 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Sw #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - torrent.handle_announcement(&peer); - let stats = torrent.metadata(); + swarm.handle_announcement(&peer); + let stats = swarm.metadata(); if completed_already { // now it is incomplete @@ -322,10 +322,10 @@ async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); @@ -334,14 +334,14 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut assert_ne!(peer.peer_addr, socket); // it should get the peer as it dose not share the socket. - assert!(torrent.peers_excluding(&socket, None).contains(&peer.into())); + assert!(swarm.peers_excluding(&socket, None).contains(&peer.into())); // set the address to the socket. peer.peer_addr = socket; - torrent.handle_announcement(&peer); // Add peer + swarm.handle_announcement(&peer); // Add peer // It should not include the peer that has the same socket. - assert!(!torrent.peers_excluding(&socket, None).contains(&peer.into())); + assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); } #[rstest] @@ -351,16 +351,16 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); } - let peers = torrent.peers(Some(TORRENT_PEERS_LIMIT)); + let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } @@ -372,11 +372,11 @@ async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut to #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { +async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut torrent, makes); + let peers = make(&mut swarm, makes); let mut peer = a_completed_peer(-1); @@ -385,12 +385,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut t peer.updated = now.sub(EXPIRE); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); - assert_eq!(torrent.len(), peers.len() + 1); + assert_eq!(swarm.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - torrent.remove_inactive(current_cutoff); + swarm.remove_inactive(current_cutoff); - assert_eq!(torrent.len(), peers.len()); + assert_eq!(swarm.len(), peers.len()); } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs similarity index 81% rename from packages/torrent-repository/tests/repository/mod.rs rename to packages/torrent-repository/tests/swarms/mod.rs index 071a187fa..20c6255fa 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -15,7 +15,7 @@ use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] -fn skip_list_mutex_std() -> Swarms { +fn swarms() -> Swarms { Swarms::default() } @@ -33,27 +33,27 @@ fn default() -> Entries { #[fixture] fn started() -> Entries { - let mut torrent = Swarm::default(); - torrent.handle_announcement(&a_started_peer(1)); - vec![(InfoHash::default(), torrent)] + let mut swarm = Swarm::default(); + swarm.handle_announcement(&a_started_peer(1)); + vec![(InfoHash::default(), swarm)] } #[fixture] fn completed() -> Entries { - let mut torrent = Swarm::default(); - torrent.handle_announcement(&a_completed_peer(2)); - vec![(InfoHash::default(), torrent)] + let mut swarm = Swarm::default(); + swarm.handle_announcement(&a_completed_peer(2)); + vec![(InfoHash::default(), swarm)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = Swarm::default(); + let mut swarm = Swarm::default(); let mut peer = a_started_peer(3); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); - vec![(InfoHash::default(), torrent)] + swarm.handle_announcement(&peer); + vec![(InfoHash::default(), swarm)] } #[fixture] @@ -201,13 +201,13 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); - if let Some((info_hash, torrent)) = entries.first() { + if let Some((info_hash, swarm)) = entries.first() { assert_eq!( Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), - Some(torrent.clone()) + Some(swarm.clone()) ); } else { assert!(repo.get(&InfoHash::default()).is_none()); @@ -225,7 +225,7 @@ async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Sw #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] repo: Swarms, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -258,7 +258,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] repo: Swarms, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -270,13 +270,13 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let torrents: Vec<(InfoHash, Swarm)> = repo + let swarms: Vec<(InfoHash, Swarm)> = repo .get_paginated(Some(&paginated)) .iter() - .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) + .map(|(i, swarm_handle)| (*i, swarm_handle.lock_or_panic().clone())) .collect(); - assert_eq!(torrents, vec![]); + assert_eq!(swarms, vec![]); } // it should return a single entry if the limit is one. @@ -313,10 +313,10 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - make(&repo, &entries); + make(&swarms, &entries); let mut metrics = AggregateSwarmMetadata::default(); @@ -329,7 +329,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[ metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(repo.get_aggregate_swarm_metadata(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata(), metrics); } #[rstest] @@ -343,21 +343,21 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[ #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] swarms: Swarms, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { - make(&repo, &entries); + make(&swarms, &entries); - let mut downloaded = repo.get_aggregate_swarm_metadata().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); - repo.import_persistent(&persistent_torrents); + swarms.import_persistent(&persistent_torrents); - assert_eq!(repo.get_aggregate_swarm_metadata().total_downloaded, downloaded); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_downloaded, downloaded); for (entry, _) in persistent_torrents { - assert!(repo.get(&entry).is_some()); + assert!(swarms.get(&entry).is_some()); } } @@ -371,21 +371,24 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { - make(&repo, &entries); +async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { + make(&swarms, &entries); for (info_hash, torrent) in entries { assert_eq!( - Some(repo.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.get(&info_hash).unwrap().lock_or_panic().clone()), Some(torrent.clone()) ); - assert_eq!(Some(repo.remove(&info_hash).unwrap().lock_or_panic().clone()), Some(torrent)); + assert_eq!( + Some(swarms.remove(&info_hash).unwrap().lock_or_panic().clone()), + Some(torrent) + ); - assert!(repo.get(&info_hash).is_none()); - assert!(repo.remove(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); + assert!(swarms.remove(&info_hash).is_none()); } - assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, 0); } #[rstest] @@ -398,7 +401,7 @@ async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -411,7 +414,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - make(&repo, &entries); + make(&swarms, &entries); let info_hash: InfoHash; let mut peer: peer::Peer; @@ -435,15 +438,15 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - repo.upsert_peer(&info_hash, &peer, None); - assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); + swarms.upsert_peer(&info_hash, &peer, None); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - repo.upsert_peer(&info_hash, &peer, None); - let stats = repo.get_swarm_metadata(&info_hash); + swarms.upsert_peer(&info_hash, &peer, None); + let stats = swarms.get_swarm_metadata(&info_hash); assert_eq!( stats, Some(SwarmMetadata { @@ -456,19 +459,19 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Verify that this new peer was inserted into the repository. { - let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); assert!(entry.peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). { - repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); + swarms.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); } // Verify that the this peer was removed from the repository. { - let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); assert!(!entry.peers(None).contains(&peer.into())); } @@ -485,15 +488,15 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] swarms: Swarms, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&repo, &entries); + make(&swarms, &entries); - repo.remove_peerless_torrents(&policy); + swarms.remove_peerless_torrents(&policy); - let torrents: Vec<(InfoHash, Swarm)> = repo + let torrents: Vec<(InfoHash, Swarm)> = swarms .get_paginated(None) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 67e532e86..5902f6735 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -20,8 +20,8 @@ use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// used in production. Other implementations are kept for reference. #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { - /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc, + /// The underlying in-memory data structure that stores swarms data. + swarms: Arc, } impl InMemoryTorrentRepository { @@ -46,7 +46,7 @@ impl InMemoryTorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - self.torrents.upsert_peer(info_hash, peer, opt_persistent_torrent) + self.swarms.upsert_peer(info_hash, peer, opt_persistent_torrent) } /// Removes a torrent entry from the repository. @@ -65,7 +65,7 @@ impl InMemoryTorrentRepository { #[cfg(test)] #[must_use] pub(crate) fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key) + self.swarms.remove(key) } /// Removes inactive peers from all torrent entries. @@ -78,7 +78,7 @@ impl InMemoryTorrentRepository { /// * `current_cutoff` - The cutoff timestamp; peers not updated since this /// time will be removed. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.torrents.remove_inactive_peers(current_cutoff); + self.swarms.remove_inactive_peers(current_cutoff); } /// Removes torrent entries that have no active peers. @@ -91,7 +91,7 @@ impl InMemoryTorrentRepository { /// * `policy` - The tracker policy containing the configuration for /// removing peerless torrents. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.torrents.remove_peerless_torrents(policy); + self.swarms.remove_peerless_torrents(policy); } /// Retrieves a torrent entry by its infohash. @@ -105,7 +105,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the torrent entry if found. #[must_use] pub(crate) fn get(&self, key: &InfoHash) -> Option { - self.torrents.get(key) + self.swarms.get(key) } /// Retrieves a paginated list of torrent entries. @@ -123,7 +123,7 @@ impl InMemoryTorrentRepository { /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { - self.torrents.get_paginated(pagination) + self.swarms.get_paginated(pagination) } /// Retrieves swarm metadata for a given torrent. @@ -141,7 +141,7 @@ impl InMemoryTorrentRepository { /// A `SwarmMetadata` struct containing the aggregated torrent data. #[must_use] pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.torrents.get_swarm_metadata_or_default(info_hash) + self.swarms.get_swarm_metadata_or_default(info_hash) } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -163,7 +163,7 @@ impl InMemoryTorrentRepository { /// the torrent, excluding the requesting client. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.torrents.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + self.swarms.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) } /// Retrieves the list of peers for a given torrent. @@ -186,7 +186,7 @@ impl InMemoryTorrentRepository { #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` - self.torrents.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + self.swarms.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) } /// Calculates and returns overall torrent metrics. @@ -200,7 +200,7 @@ impl InMemoryTorrentRepository { /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. #[must_use] pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { - self.torrents.get_aggregate_swarm_metadata() + self.swarms.get_aggregate_swarm_metadata() } /// Imports persistent torrent data into the in-memory repository. @@ -212,6 +212,6 @@ impl InMemoryTorrentRepository { /// /// * `persistent_torrents` - A reference to the persisted torrent data. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - self.torrents.import_persistent(persistent_torrents); + self.swarms.import_persistent(persistent_torrents); } } From 6d50fa083cd334bfc1f23a96d3754e98ed6ae51b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 11:45:50 +0100 Subject: [PATCH 023/247] refactor: [#1495] remove panics from Swarms type They have been moved one level up to the InMemoryTorrentRepository type. We should buble them up to the final user, returing an error in the UDP or HTTP tracker when the swarm handle lock cannot be adquired. A new issues will be opened to address that. --- Cargo.lock | 2 +- packages/torrent-repository/Cargo.toml | 2 +- packages/torrent-repository/src/lib.rs | 2 +- packages/torrent-repository/src/swarms.rs | 189 +++++++++++------- .../torrent-repository/tests/swarms/mod.rs | 25 ++- .../src/torrent/repository/in_memory.rs | 54 ++++- 6 files changed, 182 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eea957f88..093b8e9b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4850,12 +4850,12 @@ dependencies = [ "crossbeam-skiplist", "rand 0.9.1", "rstest", + "thiserror 2.0.12", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "tracing", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index e584fadf4..2cc02a720 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,11 +19,11 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index c985f7a2b..a4e7d9c5d 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -23,7 +23,7 @@ pub trait LockTrackedTorrent { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } -impl LockTrackedTorrent for Arc> { +impl LockTrackedTorrent for SwarmHandle { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { self.lock().expect("can't acquire lock for tracked torrent handle") } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 936f49d22..222bea60a 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::{LockTrackedTorrent, SwarmHandle}; +use crate::SwarmHandle; #[derive(Default, Debug)] pub struct Swarms { @@ -34,33 +34,31 @@ impl Swarms { /// Returns `true` if the number of downloads was increased because the peer /// completed the download. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. + /// This function panics if the lock for the swarm handle cannot be acquired. pub fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> bool { - if let Some(existing_entry) = self.swarms.get(info_hash) { - tracing::debug!("Torrent already exists: {:?}", info_hash); + ) -> Result { + if let Some(existing_swarm_handle) = self.swarms.get(info_hash) { + let mut swarm = existing_swarm_handle.value().lock()?; - existing_entry.value().lock_or_panic().handle_announcement(peer) + Ok(swarm.handle_announcement(peer)) } else { - tracing::debug!("Inserting new torrent: {:?}", info_hash); - - let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { + let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { SwarmHandle::default() }; - let inserted_entry = self.swarms.get_or_insert(*info_hash, new_entry); + let inserted_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); - let mut torrent_guard = inserted_entry.value().lock_or_panic(); + let mut swarm = inserted_swarm_handle.value().lock()?; - torrent_guard.handle_announcement(peer) + Ok(swarm.handle_announcement(peer)) } } @@ -79,13 +77,17 @@ impl Swarms { /// A peer is considered inactive if its last update timestamp is older than /// the provided cutoff time. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.swarms { - entry.value().lock_or_panic().remove_inactive(current_cutoff); + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result<(), Error> { + for swarm_handle in &self.swarms { + let mut swarm = swarm_handle.value().lock()?; + swarm.remove_inactive(current_cutoff); } + + Ok(()) } /// Retrieves a tracked torrent handle by its infohash. @@ -132,14 +134,17 @@ impl Swarms { /// /// A `SwarmMetadata` struct containing the aggregated torrent data if found. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - #[must_use] - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.swarms - .get(info_hash) - .map(|entry| entry.value().lock_or_panic().metadata()) + /// This function panics if the lock for the swarm handle cannot be acquired. + pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { + match self.swarms.get(info_hash) { + None => Ok(None), + Some(swarm_handle) => { + let swarm = swarm_handle.value().lock()?; + Ok(Some(swarm.metadata())) + } + } } /// Retrieves swarm metadata for a given torrent. @@ -148,11 +153,16 @@ impl Swarms { /// /// A `SwarmMetadata` struct containing the aggregated torrent data if it's /// found or a zeroed metadata struct if not. - #[must_use] - pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { match self.get_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), + Ok(Some(swarm_metadata)) => Ok(swarm_metadata), + Ok(None) => Ok(SwarmMetadata::zeroed()), + Err(err) => Err(err), } } @@ -168,14 +178,17 @@ impl Swarms { /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent, excluding the requesting client. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the torrent entry cannot be obtained. - #[must_use] - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Result>, Error> { match self.get(info_hash) { - None => vec![], - Some(entry) => entry.lock_or_panic().peers_excluding(&peer.peer_addr, Some(limit)), + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock()?; + Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) + } } } @@ -189,14 +202,17 @@ impl Swarms { /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the torrent entry cannot be obtained. - #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { - None => vec![], - Some(entry) => entry.lock_or_panic().peers(Some(limit)), + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock()?; + Ok(swarm.peers(Some(limit))) + } } } @@ -205,17 +221,22 @@ impl Swarms { /// Depending on the tracker policy, torrents without any peers may be /// removed to conserve memory. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.swarms { - if entry.value().lock_or_panic().meets_retaining_policy(policy) { + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result<(), Error> { + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + if swarm.meets_retaining_policy(policy) { continue; } - entry.remove(); + swarm_handle.remove(); } + + Ok(()) } /// Imports persistent torrent data into the in-memory repository. @@ -247,22 +268,35 @@ impl Swarms { /// /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - #[must_use] - pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.swarms { - let stats = entry.value().lock_or_panic().metadata(); + let swarm = entry.value().lock()?; + let stats = swarm.metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); metrics.total_torrents += 1; } - metrics + Ok(metrics) + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("Can't acquire swarm lock")] + CannotAcquireSwarmLock, +} + +impl From>> for Error { + fn from(_error: std::sync::PoisonError>) -> Self { + Error::CannotAcquireSwarmLock } } @@ -354,7 +388,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -363,7 +397,7 @@ mod tests { async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74); + let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74).unwrap(); assert!(peers.is_empty()); } @@ -388,7 +422,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); assert_eq!(peers.len(), 74); } @@ -411,7 +445,9 @@ mod tests { async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let peers = torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers, vec![]); } @@ -425,7 +461,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers, vec![]); } @@ -455,7 +493,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers.len(), 74); } @@ -498,9 +538,14 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated - torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .unwrap(); - assert!(!torrent_repository.get_torrent_peers(&info_hash, 74).contains(&Arc::new(peer))); + assert!(!torrent_repository + .get_torrent_peers(&info_hash, 74) + .unwrap() + .contains(&Arc::new(peer))); } fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { @@ -512,7 +557,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(info_hash, &peer, None); // Remove the peer - torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .unwrap(); torrent_repository } @@ -528,7 +575,7 @@ mod tests { ..Default::default() }; - torrent_repository.remove_peerless_torrents(&tracker_policy); + torrent_repository.remove_peerless_torrents(&tracker_policy).unwrap(); assert!(torrent_repository.get(&info_hash).is_none()); } @@ -755,7 +802,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let torrent_repository = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -774,7 +821,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -793,7 +840,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -812,7 +859,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -837,7 +884,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -870,7 +917,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&infohash, &leecher(), None); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); assert_eq!( swarm_metadata, @@ -886,7 +933,7 @@ mod tests { async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -913,7 +960,7 @@ mod tests { torrent_repository.import_persistent(&persistent_torrents); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 20c6255fa..82247bfcb 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -329,7 +329,7 @@ async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entri metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(swarms.get_aggregate_swarm_metadata(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap(), metrics); } #[rstest] @@ -349,12 +349,12 @@ async fn it_should_import_persistent_torrents( ) { make(&swarms, &entries); - let mut downloaded = swarms.get_aggregate_swarm_metadata().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); swarms.import_persistent(&persistent_torrents); - assert_eq!(swarms.get_aggregate_swarm_metadata().total_downloaded, downloaded); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded, downloaded); for (entry, _) in persistent_torrents { assert!(swarms.get(&entry).is_some()); @@ -388,7 +388,7 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e assert!(swarms.remove(&info_hash).is_none()); } - assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); } #[rstest] @@ -438,15 +438,18 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.upsert_peer(&info_hash, &peer, None); - assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); + swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + assert_eq!( + swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, + entries.len() as u64 + 1 + ); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.upsert_peer(&info_hash, &peer, None); - let stats = swarms.get_swarm_metadata(&info_hash); + swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, Some(SwarmMetadata { @@ -466,7 +469,9 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Remove peers that have not been updated since the timeout (120 seconds ago). { - swarms.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); + swarms + .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .unwrap(); } // Verify that the this peer was removed from the repository. @@ -494,7 +499,7 @@ async fn it_should_remove_peerless_torrents( ) { make(&swarms, &entries); - swarms.remove_peerless_torrents(&policy); + swarms.remove_peerless_torrents(&policy).unwrap(); let torrents: Vec<(InfoHash, Swarm)> = swarms .get_paginated(None) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 5902f6735..8c93f3605 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -39,6 +39,10 @@ impl InMemoryTorrentRepository { /// # Returns /// /// `true` if the peer stats were updated. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub fn upsert_peer( &self, @@ -46,7 +50,9 @@ impl InMemoryTorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - self.swarms.upsert_peer(info_hash, peer, opt_persistent_torrent) + self.swarms + .upsert_peer(info_hash, peer, opt_persistent_torrent) + .expect("Failed to upsert the peer in swarms") } /// Removes a torrent entry from the repository. @@ -77,8 +83,14 @@ impl InMemoryTorrentRepository { /// /// * `current_cutoff` - The cutoff timestamp; peers not updated since this /// time will be removed. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.swarms.remove_inactive_peers(current_cutoff); + self.swarms + .remove_inactive_peers(current_cutoff) + .expect("Failed to remove inactive peers from swarms"); } /// Removes torrent entries that have no active peers. @@ -90,8 +102,14 @@ impl InMemoryTorrentRepository { /// /// * `policy` - The tracker policy containing the configuration for /// removing peerless torrents. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.swarms.remove_peerless_torrents(policy); + self.swarms + .remove_peerless_torrents(policy) + .expect("Failed to remove peerless torrents from swarms"); } /// Retrieves a torrent entry by its infohash. @@ -139,9 +157,15 @@ impl InMemoryTorrentRepository { /// # Returns /// /// A `SwarmMetadata` struct containing the aggregated torrent data. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error.s #[must_use] pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.swarms.get_swarm_metadata_or_default(info_hash) + self.swarms + .get_swarm_metadata_or_default(info_hash) + .expect("Failed to get swarm metadata") } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -161,9 +185,15 @@ impl InMemoryTorrentRepository { /// /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent, excluding the requesting client. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.swarms.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + self.swarms + .get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .expect("Failed to get other peers in swarm") } /// Retrieves the list of peers for a given torrent. @@ -182,11 +212,13 @@ impl InMemoryTorrentRepository { /// /// # Panics /// - /// This function panics if the lock for the torrent entry cannot be obtained. + /// This function panics if the underling swarms return an error. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` - self.swarms.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + self.swarms + .get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + .expect("Failed to get other peers in swarm") } /// Calculates and returns overall torrent metrics. @@ -198,9 +230,15 @@ impl InMemoryTorrentRepository { /// # Returns /// /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { - self.swarms.get_aggregate_swarm_metadata() + self.swarms + .get_aggregate_swarm_metadata() + .expect("Failed to get aggregate swarm metadata") } /// Imports persistent torrent data into the in-memory repository. From 31f1fbf32216fbb1f1fc43c5c103af44e25bb462 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:07:15 +0100 Subject: [PATCH 024/247] refactgor: [#1495] make field private It was public only to allow setting a pre-defined state in tests. A new public method have been adding temporarily to explain its usage. --- packages/torrent-repository/src/swarms.rs | 14 +++++++++++--- packages/torrent-repository/tests/swarms/mod.rs | 9 +++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 222bea60a..34cd52d3b 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; @@ -12,8 +12,7 @@ use crate::SwarmHandle; #[derive(Default, Debug)] pub struct Swarms { - // todo: this needs to be public only to insert a peerless torrent (empty swarm). - pub swarms: SkipMap, + swarms: SkipMap, } impl Swarms { @@ -62,6 +61,15 @@ impl Swarms { } } + /// Inserts a new swarm. It's only used for testing purposes. It allows to + /// pre-define the initial state of the swarm without having to go through + /// the upsert process. + pub fn insert_swarm(&self, info_hash: &InfoHash, swarm: Swarm) { + // code-review: swarms builder? + let swarm_handle = Arc::new(Mutex::new(swarm)); + self.swarms.insert(*info_hash, swarm_handle); + } + /// Removes a torrent entry from the repository. /// /// # Returns diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 82247bfcb..43571eb83 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -1,6 +1,5 @@ use std::collections::{BTreeMap, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; -use std::sync::{Arc, Mutex}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; @@ -148,11 +147,9 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -fn make(repo: &Swarms, entries: &Entries) { - for (info_hash, entry) in entries { - let new = Arc::new(Mutex::new(entry.clone())); - // todo: use a public method to insert an empty swarm. - repo.swarms.insert(*info_hash, new); +fn make(swarms: &Swarms, entries: &Entries) { + for (info_hash, swarm) in entries { + swarms.insert_swarm(info_hash, swarm.clone()); } } From 5c2c1e0f77c767a945823fb3abf7091caeb17129 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:13:42 +0100 Subject: [PATCH 025/247] feat: [#1495] add len and is_empty methods to Swarms type --- packages/torrent-repository/src/swarms.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 34cd52d3b..a03b9d7e6 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -294,6 +294,16 @@ impl Swarms { Ok(metrics) } + + #[must_use] + pub fn len(&self) -> usize { + self.swarms.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.swarms.is_empty() + } } #[derive(thiserror::Error, Debug, Clone)] From 5b3142f6bae735750aa0ebead74b3587bb441f01 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:17:35 +0100 Subject: [PATCH 026/247] refactor: [#1495] refactor Swarms::upsert_peer --- packages/torrent-repository/src/swarms.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index a03b9d7e6..fb6652ba5 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -42,23 +42,17 @@ impl Swarms { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { - if let Some(existing_swarm_handle) = self.swarms.get(info_hash) { - let mut swarm = existing_swarm_handle.value().lock()?; - - Ok(swarm.handle_announcement(peer)) + let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { + SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { - let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) - } else { - SwarmHandle::default() - }; + SwarmHandle::default() + }; - let inserted_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); + let swarm_handle = self.swarms.get_or_insert(*info_hash, swarm_handle); - let mut swarm = inserted_swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock()?; - Ok(swarm.handle_announcement(peer)) - } + Ok(swarm.handle_announcement(peer)) } /// Inserts a new swarm. It's only used for testing purposes. It allows to From 4d91738d05cc2220ebdea4cb512badbf1809074f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:58:21 +0100 Subject: [PATCH 027/247] refactor: [#1495] renamings in torrent-repository pkg --- packages/torrent-repository/src/swarms.rs | 189 +++++++++--------- .../torrent-repository/tests/swarms/mod.rs | 6 +- .../src/torrent/repository/in_memory.rs | 6 +- 3 files changed, 102 insertions(+), 99 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index fb6652ba5..828e8c030 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -36,7 +36,7 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn upsert_peer( + pub fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -55,11 +55,13 @@ impl Swarms { Ok(swarm.handle_announcement(peer)) } - /// Inserts a new swarm. It's only used for testing purposes. It allows to - /// pre-define the initial state of the swarm without having to go through - /// the upsert process. - pub fn insert_swarm(&self, info_hash: &InfoHash, swarm: Swarm) { + /// Inserts a new swarm. + pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { // code-review: swarms builder? + // It's only used for testing purposes. It allows to pre-define the + // initial state of the swarm without having to go through the upsert + // process. + let swarm_handle = Arc::new(Mutex::new(swarm)); self.swarms.insert(*info_hash, swarm_handle); } @@ -184,7 +186,12 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Result>, Error> { + pub fn get_peers_peers_excluding( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + limit: usize, + ) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { @@ -208,7 +215,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { + pub fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { @@ -356,25 +363,25 @@ mod tests { #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - assert!(torrent_repository.get(&info_hash).is_some()); + assert!(swarms.get(&info_hash).is_some()); } #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - assert!(torrent_repository.get(&info_hash).is_some()); + assert!(swarms.get(&info_hash).is_some()); } } @@ -393,30 +400,30 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74).unwrap(); + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).unwrap(); assert!(peers.is_empty()); } #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -431,10 +438,10 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); } - let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); assert_eq!(peers.len(), 74); } @@ -455,10 +462,10 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let peers = torrent_repository - .get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers, vec![]); @@ -466,15 +473,15 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let peers = torrent_repository - .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers, vec![]); @@ -482,13 +489,13 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let excluded_peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &excluded_peer, None); // Add 74 peers for idx in 2..=75 { @@ -502,11 +509,11 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); } - let peers = torrent_repository - .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers.len(), 74); @@ -529,67 +536,64 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - let _unused = torrent_repository.remove(&info_hash); + let _unused = swarms.remove(&info_hash); - assert!(torrent_repository.get(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); } #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated - torrent_repository + swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) .unwrap(); - assert!(!torrent_repository - .get_torrent_peers(&info_hash, 74) - .unwrap() - .contains(&Arc::new(peer))); + assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); } fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = torrent_repository.upsert_peer(info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(info_hash, &peer, None); // Remove the peer - torrent_repository + swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) .unwrap(); - torrent_repository + swarms } #[tokio::test] async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash); let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, ..Default::default() }; - torrent_repository.remove_peerless_torrents(&tracker_policy).unwrap(); + swarms.remove_peerless_torrents(&tracker_policy).unwrap(); - assert!(torrent_repository.get(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); } } mod returning_torrent_entries { @@ -632,14 +636,14 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let torrent_entry = torrent_repository.get(&info_hash).unwrap(); + let torrent_entry = swarms.get(&info_hash).unwrap(); assert_eq!( TorrentEntryInfo { @@ -666,13 +670,13 @@ mod tests { #[tokio::test] async fn without_pagination() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let torrent_entries = torrent_repository.get_paginated(None); + let torrent_entries = swarms.get_paginated(None); assert_eq!(torrent_entries.len(), 1); @@ -707,20 +711,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -742,20 +746,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -777,20 +781,20 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); } @@ -812,9 +816,9 @@ mod tests { #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -829,11 +833,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -848,11 +852,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -867,11 +871,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -886,17 +890,16 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _number_of_downloads_increased = - torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&gen_seeded_infohash(&i), &leecher(), None); } let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -923,13 +926,13 @@ mod tests { #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let infohash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&infohash, &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&infohash, &leecher(), None); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); assert_eq!( swarm_metadata, @@ -943,9 +946,9 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -962,7 +965,7 @@ mod tests { #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let infohash = sample_info_hash(); @@ -970,9 +973,9 @@ mod tests { persistent_torrents.insert(infohash, 1); - torrent_repository.import_persistent(&persistent_torrents); + swarms.import_persistent(&persistent_torrents); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 43571eb83..8e58b9e76 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -149,7 +149,7 @@ fn persistent_three() -> PersistentTorrents { fn make(swarms: &Swarms, entries: &Entries) { for (info_hash, swarm) in entries { - swarms.insert_swarm(info_hash, swarm.clone()); + swarms.insert(info_hash, swarm.clone()); } } @@ -435,7 +435,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).unwrap(); assert_eq!( swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, entries.len() as u64 + 1 @@ -445,7 +445,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).unwrap(); let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 8c93f3605..38593bf3c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -51,7 +51,7 @@ impl InMemoryTorrentRepository { opt_persistent_torrent: Option, ) -> bool { self.swarms - .upsert_peer(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, opt_persistent_torrent) .expect("Failed to upsert the peer in swarms") } @@ -192,7 +192,7 @@ impl InMemoryTorrentRepository { #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { self.swarms - .get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) .expect("Failed to get other peers in swarm") } @@ -217,7 +217,7 @@ impl InMemoryTorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` self.swarms - .get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) .expect("Failed to get other peers in swarm") } From 4b5e914ad90c7a36552574eca65600c69c24e3f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 14:20:45 +0100 Subject: [PATCH 028/247] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 15 packages to latest compatible versions Updating backtrace v0.3.74 -> v0.3.75 Updating brotli v8.0.0 -> v8.0.1 Updating docker_credential v1.3.1 -> v1.3.2 Updating etcetera v0.8.0 -> v0.10.0 Updating h2 v0.4.9 -> v0.4.10 Updating hermit-abi v0.5.0 -> v0.5.1 Updating libm v0.2.13 -> v0.2.15 Updating local-ip-address v0.6.4 -> v0.6.5 Updating redox_syscall v0.5.11 -> v0.5.12 Updating rustls v0.23.26 -> v0.23.27 Updating rustls-pki-types v1.11.0 -> v1.12.0 Updating rustls-webpki v0.103.1 -> v0.103.2 Updating testcontainers v0.23.3 -> v0.24.0 Updating tokio v1.44.2 -> v1.45.0 Removing windows-sys v0.48.0 Removing windows-targets v0.48.5 Removing windows_aarch64_gnullvm v0.48.5 Removing windows_aarch64_msvc v0.48.5 Removing windows_i686_gnu v0.48.5 Removing windows_i686_msvc v0.48.5 Removing windows_x86_64_gnu v0.48.5 Removing windows_x86_64_gnullvm v0.48.5 Removing windows_x86_64_msvc v0.48.5 Updating winnow v0.7.8 -> v0.7.10 ``` --- Cargo.lock | 137 +++++++++++++++-------------------------------------- 1 file changed, 37 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 093b8e9b0..80f98db36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -482,9 +482,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -849,9 +849,9 @@ dependencies = [ [[package]] name = "brotli" -version = "8.0.0" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf19e729cdbd51af9a397fb9ef8ac8378007b797f8273cfbfdf45dcaa316167b" +checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1407,9 +1407,9 @@ dependencies = [ [[package]] name = "docker_credential" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31951f49556e34d90ed28342e1df7e1cb7a229c4cab0aecc627b5d91edd41d07" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" dependencies = [ "base64 0.21.7", "serde", @@ -1465,13 +1465,13 @@ dependencies = [ [[package]] name = "etcetera" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +checksum = "26c7b13d0780cb82722fd59f6f57f925e143427e4a75313a6c77243bf5326ae6" dependencies = [ "cfg-if", "home", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -1859,9 +1859,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" dependencies = [ "atomic-waker", "bytes", @@ -1935,9 +1935,9 @@ checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hermit-abi" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" +checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" [[package]] name = "hex" @@ -2337,7 +2337,7 @@ version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.0", + "hermit-abi 0.5.1", "libc", "windows-sys 0.59.0", ] @@ -2431,9 +2431,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" @@ -2443,7 +2443,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.9.0", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", ] [[package]] @@ -2488,9 +2488,9 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "local-ip-address" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c986b1747bbd3666abe4d57c64e60e6a82c2216140d8b12d5ceb33feb9de44b3" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", @@ -2929,7 +2929,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", "smallvec", "windows-targets 0.52.6", ] @@ -3401,9 +3401,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ "bitflags 2.9.0", ] @@ -3659,9 +3659,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.26" +version = "0.23.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ "once_cell", "ring", @@ -3694,15 +3694,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.103.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" dependencies = [ "ring", "rustls-pki-types", @@ -4232,9 +4235,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.23.3" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a4f01f39bb10fc2a5ab23eb0d888b1e2bb168c157f61a1b98e6c501c639c74" +checksum = "23bb7577dca13ad86a78e8271ef5d322f37229ec83b8d98da6d996c588a1ddb1" dependencies = [ "async-trait", "bollard", @@ -4387,9 +4390,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.2" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" dependencies = [ "backtrace", "bytes", @@ -5388,15 +5391,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-sys" version = "0.52.0" @@ -5415,21 +5409,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - [[package]] name = "windows-targets" version = "0.52.6" @@ -5462,12 +5441,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -5480,12 +5453,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -5498,12 +5465,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -5528,12 +5489,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -5546,12 +5501,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -5564,12 +5513,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -5582,12 +5525,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -5602,9 +5539,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e27d6ad3dac991091e4d35de9ba2d2d00647c5d0fc26c5496dee55984ae111b" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" dependencies = [ "memchr", ] From 32a37d148ca1258d47a9bcf3fbbfb3a3d99a1ba8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 17:06:24 +0100 Subject: [PATCH 029/247] fix: [#1502] bug in total number of downloads for all torrents metric Relates to: https://github.com/torrust/torrust-tracker/pull/1497/commits/34c159a161b7c167730f6c139dd3cb608173d37a A couple of days ago, I made a change in [this commit](https://github.com/torrust/torrust-tracker/pull/1497/commits/34c159a161b7c167730f6c139dd3cb608173d37a). I changed the `Swarm::meets_retaining_policy` method from: ``` /// Returns true if the torrents meets the retention policy, meaning that /// it should be kept in the tracker. pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } if policy.remove_peerless_torrents && self.is_empty() { return false; } true } ``` To: ``` pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { !(policy.remove_peerless_torrents && self.is_empty()) } ``` I thought this code was not needed: ```rust if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } ``` However, it's needed. One of the metrics returned by the tracker API is the **total number of downloads for all torrents**. ```json { "torrents": 320961, "seeders": 189885, "completed": 975119, <- this "leechers": 231044, ... } ``` That metric is always stored in memory but can optionally persist into the database. It's important to highlight that the metric represents: - The total number of downloads for **ALL** torrents ever, when the metric is persisted. - The total number of downloads for **ALL** torrents since the tracker started, when the metric is not persisted. It could be mixed up with another internal metric (not exposed via the API), which is the same counter but only for ONE swarm (one torrent). - The total number of downloads for **ONE** concrete torrent ever, when the metric is persisted. - The total number of downloads for **ONE** concrete torrent since the tracker started, when the metric is not persisted. The bug affects the first metric. The exposed via the API. The problem is that this feature conflicts with removing the peerless torrents. When removing the peerless torrents config option is enabled, the counter is lost unless it is persisted. Becuase the counter values are stored in the "Swarm" together with the list of peers. If statistics persistence is enabled, that's not a problem. When the torrent is removed from the tracker (from the swarms or swarm collection), the counter is initialised again if the torrent is added. In other words, if a new peer starts the swarm again, the number of downloads is loaded from the database. However, that works for the counter of each torrent (swarm) but not for the overall counter (the sum of downloads for all torrents). That metric is not stored anywhere. It's calculated on demand by iterating all the swarms and summing up the total for each torrent, giving the total amount of downloads for **ALL** torrents. When the torrent is removed, the downloads for that torrent don't count in the total. That is the reason we have to keep the torrent (swarm) in memory, even if it does not have any peer (and it should be removed according to the other config flag). The removed line: ```rust if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } ``` does that. **When the stats persistence is disabled**, that's one way to store the value. Alternatively, we could add another cache for the data and never remove that value. The current solution has a problem: It can make the tracker consume a lot of memory because peerless torrents are not removed in practice (even if it's configured to be). **When the stats persistence is enabled,** we can simply return the value from the database. **NOTICE:** that the value is used in the scrape response, so it might be convenient to have a cache in memory anyway. - [x] Revert the change to fix the bug asap. - [x] Write a unit test. This behaviour was not covered by any test (or documented). - [ ] Add an in-memory cache value in `Swarms` type to store the total for all torrents, regardless of which are the current active swarms. --- packages/torrent-repository/src/swarm.rs | 122 +++++++++++++++++++---- 1 file changed, 102 insertions(+), 20 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 1a17a2fb6..e5b5d598c 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -191,10 +191,20 @@ impl Swarm { } /// Returns true if the swarm meets the retention policy, meaning that - /// it should be kept in the tracker. + /// it should be kept in the list of swarms. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - !(policy.remove_peerless_torrents && self.is_empty()) + !self.should_be_removed(policy) + } + + fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { + // If the policy is to remove peerless torrents and the swarm is empty (no peers), + (policy.remove_peerless_torrents && self.is_empty()) + // but not when the policy is to persist torrent stats and the + // torrent has been downloaded at least once. + // (because the only way to store the counter is to keep the swarm in memory. + // See https://github.com/torrust/torrust-tracker/issues/1502) + && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) } } @@ -205,7 +215,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; - use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -376,28 +385,101 @@ mod tests { assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_be_kept_when_empty_if_the_tracker_policy_is_not_to_remove_peerless_torrents() { - let empty_swarm = Swarm::default(); + mod for_retaining_policy { - let policy = TrackerPolicy { - remove_peerless_torrents: false, - ..Default::default() - }; + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; - assert!(empty_swarm.meets_retaining_policy(&policy)); - } + use crate::Swarm; - #[test] - fn it_should_be_removed_when_empty_if_the_tracker_policy_is_to_remove_peerless_torrents() { - let empty_swarm = Swarm::default(); + fn empty_swarm() -> Swarm { + Swarm::default() + } - let policy = TrackerPolicy { - remove_peerless_torrents: true, - ..Default::default() - }; + fn not_empty_swarm() -> Swarm { + let mut swarm = Swarm::default(); + swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false); + swarm + } + + fn not_empty_swarm_with_downloads() -> Swarm { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + let mut downloads_increased = false; + + swarm.upsert_peer(peer.into(), &mut downloads_increased); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert_peer(peer.into(), &mut downloads_increased); + + assert!(swarm.metadata().downloads() > 0); + + swarm + } + + fn remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + } + } + + fn don_not_remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: false, + ..Default::default() + } + } - assert!(!empty_swarm.meets_retaining_policy(&policy)); + mod when_removing_peerless_torrents_is_enabled { + + use torrust_tracker_configuration::TrackerPolicy; + + use crate::swarm::tests::for_retaining_policy::{ + empty_swarm, not_empty_swarm, not_empty_swarm_with_downloads, remove_peerless_torrents_policy, + }; + + #[test] + fn it_should_be_removed_if_the_swarm_is_empty() { + assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + ) { + let policy = TrackerPolicy { + remove_peerless_torrents: true, + persistent_torrent_completed_stat: true, + ..Default::default() + }; + + assert!(!not_empty_swarm_with_downloads().should_be_removed(&policy)); + } + } + + mod when_removing_peerless_torrents_is_disabled { + + use crate::swarm::tests::for_retaining_policy::{ + don_not_remove_peerless_torrents_policy, empty_swarm, not_empty_swarm, + }; + + #[test] + fn it_should_not_be_removed_even_if_the_swarm_is_empty() { + assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + } } #[test] From 57b4822b74c4c8f81f81006dfbb45fb6bbde4e4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 11:01:43 +0100 Subject: [PATCH 030/247] refactor: remove debug print --- packages/udp-tracker-core/src/services/announce.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 499da2945..6ea237d84 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -119,8 +119,6 @@ impl AnnounceService { tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpAnnounce event: {event:?}"); - println!("Sending UdpAnnounce event: {event:?}"); - udp_stats_event_sender.send(event).await; } } From f11dfccc852605304a9923d10036a5a7d7502e28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 11:04:11 +0100 Subject: [PATCH 031/247] feat: [#1502] adding logs for debugging This adds more logs to the torrent's cleanup process. It would be helpful to find the bug described in the issue https://github.com/torrust/torrust-tracker/issues/1502. However, it will be useful afterwards. Sample output: ```output 2025-05-08T10:01:18.417631Z INFO torrust_tracker_lib::bootstrap::jobs::torrent_cleanup: Cleaning up torrents (executed every 60 secs) ... 2025-05-08T10:01:18.417661Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=2 leechers=0 2025-05-08T10:01:18.417666Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=0 peers=2 2025-05-08T10:01:18.417670Z INFO torrust_tracker_torrent_repository::swarms: Removing inactive peers since: 2025-05-08T10:00:48.417669546Z ... 2025-05-08T10:01:18.417676Z INFO torrust_tracker_torrent_repository::swarms: Inactive peers removed: 2 2025-05-08T10:01:18.417679Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=0 leechers=0 2025-05-08T10:01:18.417682Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=1 peers=0 2025-05-08T10:01:18.417685Z INFO torrust_tracker_torrent_repository::swarms: Removing peerless torrents ... 2025-05-08T10:01:18.417688Z INFO torrust_tracker_torrent_repository::swarms: Peerless torrents removed: 0 2025-05-08T10:01:18.417690Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=0 leechers=0 2025-05-08T10:01:18.417693Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=1 peers=0 2025-05-08T10:01:18.417697Z INFO torrust_tracker_lib::bootstrap::jobs::torrent_cleanup: Cleaned up torrents in: 0 ms ``` --- Cargo.lock | 1 + packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/swarm.rs | 13 +- packages/torrent-repository/src/swarms.rs | 124 ++++++++++++++---- packages/tracker-core/src/torrent/manager.rs | 35 +++++ .../src/torrent/repository/in_memory.rs | 22 ++++ .../config/tracker.development.sqlite3.toml | 6 + src/bootstrap/jobs/torrent_cleanup.rs | 5 +- 8 files changed, 181 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 80f98db36..04ce8ad8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4859,6 +4859,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "tracing", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 2cc02a720..3396cd961 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -24,6 +24,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index e5b5d598c..4437ca410 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -101,7 +101,9 @@ impl Swarm { } } - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> u64 { + let mut inactive_peers_removed = 0; + self.peers.retain(|_, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; @@ -112,10 +114,14 @@ impl Swarm { } else { self.metadata.incomplete -= 1; } + + inactive_peers_removed += 1; } is_active }); + + inactive_peers_removed } #[must_use] @@ -190,6 +196,11 @@ impl Swarm { self.peers.is_empty() } + #[must_use] + pub fn is_peerless(&self) -> bool { + self.is_empty() + } + /// Returns true if the swarm meets the retention policy, meaning that /// it should be kept in the list of swarms. #[must_use] diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 828e8c030..0746e19a8 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -2,6 +2,7 @@ use std::sync::{Arc, Mutex}; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; @@ -76,24 +77,6 @@ impl Swarms { self.swarms.remove(key).map(|entry| entry.value().clone()) } - /// Removes inactive peers from all torrent entries. - /// - /// A peer is considered inactive if its last update timestamp is older than - /// the provided cutoff time. - /// - /// # Errors - /// - /// This function returns an error if it fails to acquire the lock for any - /// swarm handle. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result<(), Error> { - for swarm_handle in &self.swarms { - let mut swarm = swarm_handle.value().lock()?; - swarm.remove_inactive(current_cutoff); - } - - Ok(()) - } - /// Retrieves a tracked torrent handle by its infohash. /// /// # Returns @@ -225,6 +208,34 @@ impl Swarms { } } + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + tracing::info!( + "Removing inactive peers since: {:?} ...", + convert_from_timestamp_to_datetime_utc(current_cutoff) + ); + + let mut inactive_peers_removed = 0; + + for swarm_handle in &self.swarms { + let mut swarm = swarm_handle.value().lock()?; + let removed = swarm.remove_inactive(current_cutoff); + inactive_peers_removed += removed; + } + + tracing::info!("Inactive peers removed: {inactive_peers_removed}"); + + Ok(inactive_peers_removed) + } + /// Removes torrent entries that have no active peers. /// /// Depending on the tracker policy, torrents without any peers may be @@ -234,7 +245,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result<(), Error> { + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { + tracing::info!("Removing peerless torrents ..."); + + let mut peerless_torrents_removed = 0; + for swarm_handle in &self.swarms { let swarm = swarm_handle.value().lock()?; @@ -243,9 +258,13 @@ impl Swarms { } swarm_handle.remove(); + + peerless_torrents_removed += 1; } - Ok(()) + tracing::info!("Peerless torrents removed: {peerless_torrents_removed}"); + + Ok(peerless_torrents_removed) } /// Imports persistent torrent data into the in-memory repository. @@ -253,7 +272,11 @@ impl Swarms { /// This method takes a set of persisted torrent entries (e.g., from a /// database) and imports them into the in-memory repository for immediate /// access. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> u64 { + tracing::info!("Importing persisted info about torrents ..."); + + let mut torrents_imported = 0; + for (info_hash, completed) in persistent_torrents { if self.swarms.contains_key(info_hash) { continue; @@ -264,7 +287,13 @@ impl Swarms { // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. self.swarms.get_or_insert(*info_hash, entry); + + torrents_imported += 1; } + + tracing::info!("Imported torrents: {torrents_imported}"); + + torrents_imported } /// Calculates and returns overall torrent metrics. @@ -284,9 +313,11 @@ impl Swarms { pub fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); - for entry in &self.swarms { - let swarm = entry.value().lock()?; + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + let stats = swarm.metadata(); + metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); @@ -296,6 +327,53 @@ impl Swarms { Ok(metrics) } + /// Counts the number of torrents that are peerless (i.e., have no active + /// peers). + /// + /// # Returns + /// + /// A `usize` representing the number of peerless torrents. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn count_peerless_torrents(&self) -> Result { + let mut peerless_torrents = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + if swarm.is_peerless() { + peerless_torrents += 1; + } + } + + Ok(peerless_torrents) + } + + /// Counts the total number of peers across all torrents. + /// + /// # Returns + /// + /// A `usize` representing the total number of peers. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn count_peers(&self) -> Result { + let mut peers = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + peers += swarm.len(); + } + + Ok(peers) + } + #[must_use] pub fn len(&self) -> usize { self.swarms.len() diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 5c8352f11..5afbcecf2 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -92,16 +92,51 @@ impl TorrentsManager { /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. pub fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata(); + + self.remove_inactive_peers(); + + self.log_aggregate_swarm_metadata(); + + self.remove_peerless_torrents(); + + self.log_aggregate_swarm_metadata(); + } + + fn remove_inactive_peers(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + } + fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository .remove_peerless_torrents(&self.config.tracker_policy); } } + + fn log_aggregate_swarm_metadata(&self) { + // Pre-calculated data + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata(); + + tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", + torrents = aggregate_swarm_metadata.total_torrents, + downloads = aggregate_swarm_metadata.total_downloaded, + seeders = aggregate_swarm_metadata.total_complete, + leechers = aggregate_swarm_metadata.total_incomplete, + ); + + // Hot data (iterating over data structures) + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents(); + let peers = self.in_memory_torrent_repository.count_peers(); + + tracing::info!(name: "hot_aggregate_swarm_metadata", + peerless_torrents = peerless_torrents, + peers = peers, + ); + } } #[cfg(test)] diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 38593bf3c..ffb53edad 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -241,6 +241,28 @@ impl InMemoryTorrentRepository { .expect("Failed to get aggregate swarm metadata") } + /// Counts the number of peerless torrents in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub fn count_peerless_torrents(&self) -> usize { + self.swarms + .count_peerless_torrents() + .expect("Failed to count peerless torrents") + } + + /// Counts the number of peers in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub fn count_peers(&self) -> usize { + self.swarms.count_peers().expect("Failed to count peers") + } + /// Imports persistent torrent data into the in-memory repository. /// /// This method takes a set of persisted torrent entries (e.g., from a database) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 333c6d66c..8d03f2300 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,9 +7,15 @@ schema_version = "2.0.0" threshold = "info" [core] +#inactive_peer_cleanup_interval = 60 listed = false private = false +#[core.tracker_policy] +#max_peer_timeout = 30 +#persistent_torrent_completed_stat = true +#remove_peerless_torrents = true + [[udp_trackers]] bind_address = "0.0.0.0:6868" tracker_usage_statistics = true diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 54b1eeef7..0107b5370 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -28,6 +28,7 @@ use tracing::instrument; pub fn start_job(config: &Core, torrents_manager: &Arc) -> JoinHandle<()> { let weak_torrents_manager = std::sync::Arc::downgrade(torrents_manager); let interval = config.inactive_peer_cleanup_interval; + let interval_in_secs = interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); @@ -43,9 +44,9 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join _ = interval.tick() => { if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); - tracing::info!("Cleaning up torrents.."); + tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); torrents_manager.cleanup_torrents(); - tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } From 46c7eae0fd53cbfc628ed85676eec8cee681f283 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 16:42:17 +0100 Subject: [PATCH 032/247] dev: enable persistence for downdloads in dev config There are no performance problems in dev env, so it's better to enable as many features as possible to tests them while developing. --- share/default/config/tracker.development.sqlite3.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 8d03f2300..488743eb9 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -11,9 +11,9 @@ threshold = "info" listed = false private = false -#[core.tracker_policy] +[core.tracker_policy] #max_peer_timeout = 30 -#persistent_torrent_completed_stat = true +persistent_torrent_completed_stat = true #remove_peerless_torrents = true [[udp_trackers]] From ced2788a2854203be9653169d23e65415d7b4972 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 16:54:06 +0100 Subject: [PATCH 033/247] fix: [#1502] import torrents' download counters from DB when the tracker starts. In the current implementation all torrents that have benn downloaded at least once have to be in memory initializting the counter. Otherwise, the global counter for downloads for all torrents only includes downloads for the torrents being currently tracker by the tracker. --- packages/tracker-core/src/torrent/manager.rs | 5 ++--- src/app.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 5afbcecf2..aaac811f2 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -60,7 +60,7 @@ impl TorrentsManager { } } - /// Loads torrents from the persistent database into the in-memory repository. + /// Loads torrents from the database into the in-memory repository. /// /// This function retrieves the list of persistent torrent entries (which /// include only the aggregate metrics, not the detailed peer lists) from @@ -70,8 +70,7 @@ impl TorrentsManager { /// /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. - #[allow(dead_code)] - pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); diff --git a/src/app.rs b/src/app.rs index 8f5c6ca4c..7bfa5296a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -61,6 +61,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; + load_torrents_from_database(config, app_container); } async fn start_jobs(config: &Configuration, app_container: &Arc) -> JobManager { @@ -109,6 +110,16 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { + if config.core.tracker_policy.persistent_torrent_completed_stat { + app_container + .tracker_core_container + .torrents_manager + .load_torrents_from_database() + .expect("Could not load torrents from database."); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); From 632185bf2237affb044f5c5f32c458061b460f40 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 17:02:04 +0100 Subject: [PATCH 034/247] refactor: tracing spwams to use structure formats When possible prefer this with "variable=value" format: ``` imported_torrents=2 ``` To this: ``` Imported torrents: 2 ``` It's easier to parse and less likely to be changed. --- packages/torrent-repository/src/swarms.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 0746e19a8..a140663c9 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -231,7 +231,7 @@ impl Swarms { inactive_peers_removed += removed; } - tracing::info!("Inactive peers removed: {inactive_peers_removed}"); + tracing::info!(inactive_peers_removed = inactive_peers_removed); Ok(inactive_peers_removed) } @@ -262,7 +262,7 @@ impl Swarms { peerless_torrents_removed += 1; } - tracing::info!("Peerless torrents removed: {peerless_torrents_removed}"); + tracing::info!(peerless_torrents_removed = peerless_torrents_removed); Ok(peerless_torrents_removed) } @@ -291,7 +291,7 @@ impl Swarms { torrents_imported += 1; } - tracing::info!("Imported torrents: {torrents_imported}"); + tracing::info!(imported_torrents = torrents_imported); torrents_imported } From cb487f36588c681988f5f4c75eacf87a8539dc1c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 08:35:40 +0100 Subject: [PATCH 035/247] fix: [#1510] disable torrent stats importation at start When the tracker starts, if stats persistence is enabled, all torrents that have ever been downloaded are loaded into memory (`Swarms` type) with their download counter. That's the current way to count all downloads and expose that metric. However, it does not work with **millions of torrents** (like in the tracker demo) becuase: - It's too slow. - It consumes too much memory (all torrents that have ever been downloaded have to be loaded). A new solution is needed to keep that metric, but in the meantime, this disables that feature, producing these effects: - Non-accurate value for downloads when the tracker is restarted. - Increasing indefinitely the number of torrents in memory even if the "remove peerless torrents" policy is enabled (becuase this feature overrides that policy and peerless torrents are kept in memory). --- src/app.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/app.rs b/src/app.rs index 7bfa5296a..93035ee99 100644 --- a/src/app.rs +++ b/src/app.rs @@ -61,7 +61,12 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; - load_torrents_from_database(config, app_container); + // todo: disabled because of performance issues. + // The tracker demo has a lot of torrents and loading them all at once is not + // efficient. We also load them on demand but the total number of downloads + // metric is not accurate because not all torrents are loaded. + // See: https://github.com/torrust/torrust-tracker/issues/1510 + //load_torrents_from_database(config, app_container); } async fn start_jobs(config: &Configuration, app_container: &Arc) -> JobManager { @@ -110,6 +115,7 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { app_container From 243c25484ce796db48d3532eadd6b76c7fd4f3eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 10:34:36 +0100 Subject: [PATCH 036/247] feat: allow incrementing/decrementing gauge metrics --- packages/metrics/src/gauge.rs | 22 +++++++ packages/metrics/src/metric/mod.rs | 8 +++ packages/metrics/src/metric_collection.rs | 62 ++++++++++++++++++++ packages/metrics/src/sample.rs | 38 +++++++++++- packages/metrics/src/sample_collection.rs | 70 +++++++++++++++++++---- 5 files changed, 187 insertions(+), 13 deletions(-) diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index 61ff3024c..3f6089955 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -20,6 +20,14 @@ impl Gauge { pub fn set(&mut self, value: f64) { self.0 = value; } + + pub fn increment(&mut self, value: f64) { + self.0 += value; + } + + pub fn decrement(&mut self, value: f64) { + self.0 -= value; + } } impl From for Gauge { @@ -72,6 +80,20 @@ mod tests { assert_relative_eq!(gauge.value(), 1.0); } + #[test] + fn it_could_be_incremented() { + let mut gauge = Gauge::new(0.0); + gauge.increment(1.0); + assert_relative_eq!(gauge.value(), 1.0); + } + + #[test] + fn it_could_be_decremented() { + let mut gauge = Gauge::new(1.0); + gauge.decrement(1.0); + assert_relative_eq!(gauge.value(), 0.0); + } + #[test] fn it_serializes_to_prometheus() { let counter = Gauge::new(42.0); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index ecce90f18..05779f09f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -61,6 +61,14 @@ impl Metric { pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { self.sample_collection.set(label_set, value, time); } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.increment(label_set, time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.decrement(label_set, time); + } } impl PrometheusSerializable for Metric { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 9e89c3c4b..438f3b03a 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -136,6 +136,38 @@ impl MetricCollection { Ok(()) } + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn increase_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.increment(name, label_set, time); + + Ok(()) + } + + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn decrease_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.decrement(name, label_set, time); + + Ok(()) + } + pub fn ensure_gauge_exists(&mut self, name: &MetricName) { self.gauges.ensure_metric_exists(name); } @@ -353,6 +385,36 @@ impl MetricKindCollection { metric.set(label_set, value, time); } + /// Increments the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.increment(label_set, time); + } + + /// Decrements the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.decrement(label_set, time); + } + #[must_use] pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index 5567dffec..4621c9906 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -64,6 +64,14 @@ impl Sample { pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { self.measurement.set(value, time); } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.increment(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.decrement(time); + } } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -121,6 +129,16 @@ impl Measurement { self.value.set(value); self.set_recorded_at(time); } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.value.increment(1.0); + self.set_recorded_at(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.value.decrement(1.0); + self.set_recorded_at(time); + } } /// Serializes the `recorded_at` field as a string in ISO 8601 format (RFC 3339). @@ -273,7 +291,7 @@ mod tests { } #[test] - fn it_should_allow_incrementing_the_counter() { + fn it_should_allow_setting_a_value() { let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); sample.set(1.0, updated_at_time()); @@ -281,6 +299,24 @@ mod tests { assert_eq!(sample.value(), &Gauge::new(1.0)); } + #[test] + fn it_should_allow_incrementing_the_value() { + let mut sample = Sample::new(Gauge::new(0.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.increment(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(1.0)); + } + + #[test] + fn it_should_allow_decrementing_the_value() { + let mut sample = Sample::new(Gauge::new(1.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.decrement(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(0.0)); + } + #[test] fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index 49c839673..ea6b4d4af 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -90,6 +90,24 @@ impl SampleCollection { sample.set(value, time); } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.increment(time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.decrement(time); + } } impl Serialize for SampleCollection { @@ -278,7 +296,7 @@ mod tests { #[test] fn it_should_increment_the_counter_for_a_preexisting_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Initialize the sample collection.increment(&label_set, sample_update_time()); @@ -296,7 +314,7 @@ mod tests { #[test] fn it_should_allow_increment_the_counter_for_a_non_existent_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Increment a non-existent label collection.increment(&label_set, sample_update_time()); @@ -312,7 +330,7 @@ mod tests { let label_set = LabelSet::default(); let initial_time = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.increment(&label_set, initial_time); // Increment with a new time @@ -330,7 +348,7 @@ mod tests { let label2 = LabelSet::from([("name", "value2")]); let now = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.increment(&label1, now); collection.increment(&label2, now); @@ -351,9 +369,9 @@ mod tests { use crate::gauge::Gauge; #[test] - fn it_should_increment_the_gauge_for_a_preexisting_label_set() { + fn it_should_allow_setting_the_gauge_for_a_preexisting_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Initialize the sample collection.set(&label_set, 1.0, sample_update_time()); @@ -369,9 +387,9 @@ mod tests { } #[test] - fn it_should_allow_increment_the_gauge_for_a_non_existent_label_set() { + fn it_should_allow_setting_the_gauge_for_a_non_existent_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Set a non-existent label collection.set(&label_set, 1.0, sample_update_time()); @@ -383,11 +401,11 @@ mod tests { } #[test] - fn it_should_update_the_latest_update_time_when_incremented() { + fn it_should_update_the_latest_update_time_when_setting() { let label_set = LabelSet::default(); let initial_time = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.set(&label_set, 1.0, initial_time); // Set with a new time @@ -400,12 +418,12 @@ mod tests { } #[test] - fn it_should_increment_the_gauge_for_multiple_labels() { + fn it_should_allow_setting_the_gauge_for_multiple_labels() { let label1 = LabelSet::from([("name", "value1")]); let label2 = LabelSet::from([("name", "value2")]); let now = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.set(&label1, 1.0, now); collection.set(&label2, 2.0, now); @@ -414,5 +432,33 @@ mod tests { assert_eq!(collection.get(&label2).unwrap().value(), &Gauge::new(2.0)); assert_eq!(collection.len(), 2); } + + #[test] + fn it_should_allow_incrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.increment(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_decrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.decrement(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(0.0)); + } } } From 2522ad4ccff7bbc0010581d10bdc2f32abc90555 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 16:40:14 +0100 Subject: [PATCH 037/247] feat: [#1358] basic scaffolding for events in torrent-repository pkg TODO: - Run the event listener for the torrent-repository package when the tracker starts. - Inject enven sender in `Swarms` and `Swarm` type to send events. - Trigger events and process them to update the metrics. - Expose the metrics via the `metrics` API endpoint. - ... --- Cargo.lock | 3 + packages/torrent-repository/Cargo.toml | 3 + packages/torrent-repository/src/event.rs | 44 ++++++++++++++ packages/torrent-repository/src/lib.rs | 4 ++ .../src/statistics/event/handler.rs | 21 +++++++ .../src/statistics/event/listener.rs | 57 +++++++++++++++++++ .../src/statistics/event/mod.rs | 2 + .../src/statistics/metrics.rs | 39 +++++++++++++ .../torrent-repository/src/statistics/mod.rs | 34 +++++++++++ .../src/statistics/repository.rs | 54 ++++++++++++++++++ 10 files changed, 261 insertions(+) create mode 100644 packages/torrent-repository/src/event.rs create mode 100644 packages/torrent-repository/src/statistics/event/handler.rs create mode 100644 packages/torrent-repository/src/statistics/event/listener.rs create mode 100644 packages/torrent-repository/src/statistics/event/mod.rs create mode 100644 packages/torrent-repository/src/statistics/metrics.rs create mode 100644 packages/torrent-repository/src/statistics/mod.rs create mode 100644 packages/torrent-repository/src/statistics/repository.rs diff --git a/Cargo.lock b/Cargo.lock index 04ce8ad8c..90a6354bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4853,10 +4853,13 @@ dependencies = [ "crossbeam-skiplist", "rand 0.9.1", "rstest", + "serde", "thiserror 2.0.12", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 3396cd961..77192c7cf 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,10 +19,13 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +serde = "1.0.219" thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs new file mode 100644 index 000000000..57fe7bc4b --- /dev/null +++ b/packages/torrent-repository/src/event.rs @@ -0,0 +1,44 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::PeerAnnouncement; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TorrentAdded { + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + TorrentRemoved { + info_hash: InfoHash, + }, + PeerAdded { + announcement: PeerAnnouncement, + }, + PeerRemoved { + socket_addr: SocketAddr, + peer_id: PeerId, + }, +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus; +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index a4e7d9c5d..0d455177c 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,3 +1,5 @@ +pub mod event; +pub mod statistics; pub mod swarm; pub mod swarms; @@ -19,6 +21,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; + pub trait LockTrackedTorrent { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs new file mode 100644 index 000000000..d68df0b1b --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; + +/// # Panics +/// +/// This function panics if the client IP address is not the same as the IP +/// version of the event. +pub async fn handle_event(_event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { + /*match event { + Event::TorrentAdded { .. } => {} + Event::TorrentRemoved { .. } => {} + Event::PeerAdded { .. } => {} + Event::PeerRemoved { .. } => {} + }*/ + + tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); +} diff --git a/packages/torrent-repository/src/statistics/event/listener.rs b/packages/torrent-repository/src/statistics/event/listener.rs new file mode 100644 index 000000000..f3b534332 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/listener.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; +use crate::{CurrentClock, TORRENT_REPOSITORY_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Starting torrent repository event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/torrent-repository/src/statistics/event/mod.rs b/packages/torrent-repository/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/torrent-repository/src/statistics/metrics.rs new file mode 100644 index 000000000..6ee275e63 --- /dev/null +++ b/packages/torrent-repository/src/statistics/metrics.rs @@ -0,0 +1,39 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs new file mode 100644 index 000000000..b0dce479f --- /dev/null +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -0,0 +1,34 @@ +pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; +const TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_persistent_torrents_downloads_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of torrent downloads since the tracker process started.", + )), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of torrent downloads since persistent statistics were enabled the first time.", + )), + ); + + metrics +} diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs new file mode 100644 index 000000000..9fdff7008 --- /dev/null +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increase_counter(metric_name, labels, now); + + drop(stats_lock); + + result + } +} From f986bdaf2396dc7a921a86ed168d3bd684c64931 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 17:18:55 +0100 Subject: [PATCH 038/247] feat: [#1358] add the and run the event listener when the tracker starts This creates independent services that are not used yet in the tracker-core, meaning the `Swarms` object created in the `TorrentRepositoryContainer` will not store any torrent yet. The tracker core is still creating its own fresh instance. --- Cargo.lock | 1 + Cargo.toml | 1 + packages/torrent-repository/src/container.rs | 37 ++++++++++++++++++++ packages/torrent-repository/src/lib.rs | 1 + src/app.rs | 14 ++++++++ src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/torrent_repository.rs | 20 +++++++++++ src/container.rs | 11 ++++++ 8 files changed, 86 insertions(+) create mode 100644 packages/torrent-repository/src/container.rs create mode 100644 src/bootstrap/jobs/torrent_repository.rs diff --git a/Cargo.lock b/Cargo.lock index 90a6354bc..5f024dcc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4713,6 +4713,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", diff --git a/Cargo.toml b/Cargo.toml index a15ff78df..219701d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs new file mode 100644 index 000000000..7522c7956 --- /dev/null +++ b/packages/torrent-repository/src/container.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; +use crate::event::{self}; +use crate::statistics::repository::Repository; +use crate::{statistics, Swarms}; + +pub struct TorrentRepositoryContainer { + pub swarms: Arc, + pub event_bus: Arc, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc, +} + +impl TorrentRepositoryContainer { + #[must_use] + pub fn initialize() -> Self { + let swarms = Arc::new(Swarms::default()); + + // Torrent repository stats + let broadcaster = Broadcaster::default(); + let stats_repository = Arc::new(Repository::new()); + + // todo: add a config option to enable/disable stats for this package + let event_bus = Arc::new(EventBus::new(true, broadcaster.clone())); + + let stats_event_sender = event_bus.sender(); + + Self { + swarms, + event_bus, + stats_event_sender, + stats_repository, + } + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 0d455177c..c6790c4db 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,3 +1,4 @@ +pub mod container; pub mod event; pub mod statistics; pub mod swarm; diff --git a/src/app.rs b/src/app.rs index 93035ee99..ca8b7a5c3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -72,9 +72,11 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { let mut job_manager = JobManager::new(); + start_torrent_repository_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); start_udp_server_event_listener(config, app_container, &mut job_manager); + start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; start_the_http_api(config, app_container, &mut job_manager).await; @@ -126,6 +128,18 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc, + job_manager: &mut JobManager, +) { + let opt_handle = jobs::torrent_repository::start_event_listener(config, app_container); + + if let Some(handle) = opt_handle { + job_manager.push("torrent_repository_event_listener", handle); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 2e3d798ad..b311c6da6 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -11,6 +11,7 @@ pub mod http_tracker; pub mod http_tracker_core; pub mod manager; pub mod torrent_cleanup; +pub mod torrent_repository; pub mod tracker_apis; pub mod udp_tracker; pub mod udp_tracker_core; diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs new file mode 100644 index 000000000..2125de554 --- /dev/null +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + app_container.torrent_repository_container.event_bus.receiver(), + &app_container.torrent_repository_container.stats_repository, + ); + + Some(job) + } else { + tracing::info!("HTTP tracker core event listener job is disabled."); + None + } +} diff --git a/src/container.rs b/src/container.rs index 93f1fb4d7..016b4a881 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,6 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -28,6 +29,9 @@ pub struct AppContainer { // Registar pub registar: Arc, + // Torrent Repository + pub torrent_repository_container: Arc, + // Core pub tracker_core_container: Arc, @@ -54,6 +58,10 @@ impl AppContainer { let registar = Arc::new(Registar::default()); + // Torrent Repository + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + // Core let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); @@ -84,6 +92,9 @@ impl AppContainer { // Registar registar, + // Torrent Repository + torrent_repository_container, + // Core tracker_core_container, From 95766bb9897a8ecec544bf6d38bcfd532c1d0ea9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 17:44:26 +0100 Subject: [PATCH 039/247] feat: [#1358] inject Swarms into InMemoryTorrentRepository in production code todo: do the same for testing code. --- packages/torrent-repository/src/swarms.rs | 4 +++- packages/tracker-core/src/container.rs | 15 ++++++++++++++- .../src/torrent/repository/in_memory.rs | 5 +++++ src/container.rs | 5 ++++- 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index a140663c9..9dddaa0c0 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -9,7 +9,7 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::SwarmHandle; +use crate::{SwarmHandle, TORRENT_REPOSITORY_LOG_TARGET}; #[derive(Default, Debug)] pub struct Swarms { @@ -43,6 +43,8 @@ impl Swarms { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { + tracing::trace!(target: TORRENT_REPOSITORY_LOG_TARGET, "Handling announcement for torrent: {info_hash}"); + let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 9f4d23802..3f35c3943 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,6 +1,8 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -35,8 +37,19 @@ pub struct TrackerCoreContainer { } impl TrackerCoreContainer { + #[must_use] + pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { + Self::inner_initialize(core_config, &torrent_repository_container.swarms) + } + #[must_use] pub fn initialize(core_config: &Arc) -> Self { + let swarms = Arc::new(Swarms::default()); + Self::inner_initialize(core_config, &swarms) + } + + #[must_use] + fn inner_initialize(core_config: &Arc, swarms: &Arc) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -48,7 +61,7 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms.clone())); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ffb53edad..c8e593471 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -25,6 +25,11 @@ pub struct InMemoryTorrentRepository { } impl InMemoryTorrentRepository { + #[must_use] + pub fn new(swarms: Arc) -> Self { + Self { swarms } + } + /// Inserts or updates a peer in the torrent entry corresponding to the /// given infohash. /// diff --git a/src/container.rs b/src/container.rs index 016b4a881..838de58d6 100644 --- a/src/container.rs +++ b/src/container.rs @@ -64,7 +64,10 @@ impl AppContainer { // Core - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); // HTTP From 41f402292a8b4661cf3d9ca0032d9f506ba0ea43 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 11:10:13 +0100 Subject: [PATCH 040/247] feat: [#1358] inject Swarms into InMemoryTorrentRepository in testing code --- Cargo.lock | 6 ++++++ packages/axum-http-tracker-server/Cargo.toml | 1 + .../axum-http-tracker-server/src/environment.rs | 9 ++++++++- packages/axum-http-tracker-server/src/server.rs | 8 +++++++- packages/axum-rest-tracker-api-server/Cargo.toml | 1 + .../src/environment.rs | 11 ++++++++++- packages/http-tracker-core/Cargo.toml | 1 + packages/http-tracker-core/src/container.rs | 10 +++++++++- packages/rest-tracker-api-core/Cargo.toml | 1 + packages/rest-tracker-api-core/src/container.rs | 11 ++++++++++- packages/tracker-core/src/container.rs | 14 +------------- packages/udp-tracker-core/Cargo.toml | 1 + packages/udp-tracker-core/src/container.rs | 10 +++++++++- packages/udp-tracker-server/Cargo.toml | 1 + packages/udp-tracker-server/src/environment.rs | 10 +++++++++- 15 files changed, 75 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f024dcc2..b39355065 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -593,6 +593,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", ] @@ -708,6 +709,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "zerocopy 0.7.35", ] @@ -4575,6 +4577,7 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower", "tower-http", "tracing", @@ -4614,6 +4617,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tower", "tower-http", @@ -4666,6 +4670,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", ] @@ -4913,6 +4918,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "url", "uuid", diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 1b4627d41..81831a614 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index aeb53a710..b9ac6bdbb 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,6 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -143,7 +144,13 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ff1650b9c..3904449fa 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -260,6 +260,7 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; + use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher}; @@ -289,7 +290,12 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); let announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index d1491c96e..296f77d61 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,6 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 275d72574..0758b38d1 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,6 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -172,11 +173,19 @@ impl EnvContainer { .clone(), ); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 5473c5a25..37b540e39 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,6 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" [dev-dependencies] diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 681d4a4f4..922273610 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -26,7 +27,13 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) } @@ -36,6 +43,7 @@ impl HttpTrackerCoreContainer { http_tracker_config: &Arc, ) -> Arc { let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &http_tracker_core_services, http_tracker_config) } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 0077572fb..de1946239 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,6 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index ec3786dfb..327ab4bd6 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,6 +7,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { @@ -26,11 +27,19 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 3f35c3943..f4fb272de 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; -use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -39,17 +38,6 @@ pub struct TrackerCoreContainer { impl TrackerCoreContainer { #[must_use] pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { - Self::inner_initialize(core_config, &torrent_repository_container.swarms) - } - - #[must_use] - pub fn initialize(core_config: &Arc) -> Self { - let swarms = Arc::new(Swarms::default()); - Self::inner_initialize(core_config, &swarms) - } - - #[must_use] - fn inner_initialize(core_config: &Arc, swarms: &Arc) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -61,7 +49,7 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 6cf250074..9a27ec826 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 98c01a703..2b6567ec0 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -31,7 +32,13 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) } @@ -41,6 +48,7 @@ impl UdpTrackerCoreContainer { udp_tracker_config: &Arc, ) -> Arc { let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &udp_tracker_core_services, udp_tracker_config) } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 4d0296461..a0c129acb 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 962442fde..e3667e74a 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,6 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -173,9 +174,16 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { From 68b930d4b4e89c3003fb38689f6c2b3c32bb06d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 11:30:29 +0100 Subject: [PATCH 041/247] feat: [#1495] expose new torrent-repositoru metrics via the REST API These are the new metrics in JSON format: http://localhost:1212/api/v1/metrics?token=MyAccessToken ```json { "metrics": [ { "kind": "counter", "name": "torrent_repository_persistent_torrents_downloads_total", "samples": [] }, { "kind": "counter", "name": "torrent_repository_runtime_torrents_downloads_total", "samples": [] } ] } ``` --- .../src/environment.rs | 1 + .../src/v1/context/stats/handlers.rs | 2 ++ .../src/v1/context/stats/routes.rs | 1 + .../rest-tracker-api-core/src/container.rs | 22 ++++++++++++++++--- .../src/statistics/services.rs | 6 +++++ src/container.rs | 9 ++++++-- 6 files changed, 36 insertions(+), 5 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 0758b38d1..ae3eadb31 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -189,6 +189,7 @@ impl EnvContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 17d3e4f2d..552958d74 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -69,6 +69,7 @@ pub async fn get_metrics_handler( State(state): State<( Arc, Arc>, + Arc, Arc, Arc, Arc, @@ -81,6 +82,7 @@ pub async fn get_metrics_handler( state.2.clone(), state.3.clone(), state.4.clone(), + state.5.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index c19f08b2a..3eeaa8bf4 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -28,6 +28,7 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, + + // Torrent repository + pub torrent_repository_container: Arc, + + // Tracker core pub tracker_core_container: Arc, + + // HTTP tracker core pub http_stats_repository: Arc, + + // UDP tracker core pub ban_service: Arc>, pub udp_core_stats_repository: Arc, pub udp_server_stats_repository: Arc, @@ -43,6 +52,7 @@ impl TrackerHttpApiCoreContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -53,6 +63,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( + torrent_repository_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, @@ -60,16 +71,21 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc, ) -> Arc { Arc::new(TrackerHttpApiCoreContainer { + http_api_config: http_api_config.clone(), + + // Torrent repository + torrent_repository_container: torrent_repository_container.clone(), + + // Tracker core tracker_core_container: tracker_core_container.clone(), + // HTTP tracker core http_stats_repository: http_tracker_core_container.stats_repository.clone(), + // UDP tracker core ban_service: udp_tracker_core_container.ban_service.clone(), udp_core_stats_repository: udp_tracker_core_container.stats_repository.clone(), - udp_server_stats_repository: udp_tracker_server_container.stats_repository.clone(), - - http_api_config: http_api_config.clone(), }) } } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 8d5b7514a..b8c2f3f1d 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -93,6 +93,7 @@ pub struct TrackerLabeledMetrics { pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, + swarms_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, udp_server_stats_repository: Arc, @@ -100,12 +101,17 @@ pub async fn get_labeled_metrics( let _torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let swarms_stats = swarms_stats_repository.get_metrics().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; // Merge all the metrics into a single collection let mut metrics = MetricCollection::default(); + + metrics + .merge(&swarms_stats.metric_collection) + .expect("msg: failed to merge torrent repository metrics"); metrics .merge(&http_stats.metric_collection) .expect("msg: failed to merge HTTP core metrics"); diff --git a/src/container.rs b/src/container.rs index 838de58d6..273425fc1 100644 --- a/src/container.rs +++ b/src/container.rs @@ -142,10 +142,15 @@ impl AppContainer { #[must_use] pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> Arc { TrackerHttpApiCoreContainer { - tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), - ban_service: self.udp_tracker_core_services.ban_service.clone(), + + torrent_repository_container: self.torrent_repository_container.clone(), + + tracker_core_container: self.tracker_core_container.clone(), + http_stats_repository: self.http_tracker_core_services.stats_repository.clone(), + + ban_service: self.udp_tracker_core_services.ban_service.clone(), udp_core_stats_repository: self.udp_tracker_core_services.stats_repository.clone(), udp_server_stats_repository: self.udp_tracker_server_container.stats_repository.clone(), } From 2c479a1baa112a4bd86eeb686bc018c3e4f08716 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 16:38:25 +0100 Subject: [PATCH 042/247] refactor: [#1358] inject event sender in Swarms type --- .../src/environment.rs | 8 +- .../tests/server/v1/contract.rs | 31 ++-- .../src/environment.rs | 8 +- .../tests/server/v1/contract/context/stats.rs | 3 +- .../server/v1/contract/context/torrent.rs | 18 +-- packages/events/src/sender.rs | 1 + packages/torrent-repository/src/container.rs | 4 +- .../src/statistics/event/handler.rs | 26 +++- packages/torrent-repository/src/swarms.rs | 140 ++++++++++++------ .../torrent-repository/tests/swarms/mod.rs | 8 +- packages/tracker-core/src/announce_handler.rs | 14 +- packages/tracker-core/src/torrent/manager.rs | 30 ++-- .../src/torrent/repository/in_memory.rs | 9 +- packages/tracker-core/src/torrent/services.rs | 36 +++-- .../udp-tracker-server/src/environment.rs | 5 +- .../src/handlers/announce.rs | 18 ++- .../udp-tracker-server/src/handlers/scrape.rs | 4 +- 17 files changed, 231 insertions(+), 132 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index b9ac6bdbb..078bda9e5 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -25,12 +25,12 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d1f52d55a..afd4d3168 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -474,7 +474,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(*env.bind_address()) @@ -517,7 +517,7 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv4); + env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -527,7 +527,7 @@ mod for_all_config_modes { 8080, )) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv6); + env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. let response = Client::new(*env.bind_address()) @@ -625,7 +625,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses let response = Client::new(*env.bind_address()) @@ -666,7 +666,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -1010,7 +1010,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1050,7 +1051,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_no_bytes_pending_to_download() .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1282,7 +1284,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1318,7 +1321,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; env.container .tracker_core_container @@ -1494,7 +1498,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1525,7 +1530,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let expiring_key = env .container @@ -1576,7 +1582,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index ae3eadb31..e4a83d15d 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 51a4804e7..7cae0abbf 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -21,7 +21,8 @@ async fn should_allow_getting_tracker_statistics() { env.add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 &PeerBuilder::default().into(), - ); + ) + .await; let request_id = Uuid::new_v4(); diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index 42421db99..ae9819785 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -26,7 +26,7 @@ async fn should_allow_getting_all_torrents() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -59,8 +59,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -96,8 +96,8 @@ async fn should_allow_the_torrents_result_pagination() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -132,8 +132,8 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -307,7 +307,7 @@ async fn should_allow_getting_a_torrent_info() { let peer = PeerBuilder::default().into(); - env.add_torrent_peer(&info_hash, &peer); + env.add_torrent_peer(&info_hash, &peer).await; let request_id = Uuid::new_v4(); @@ -389,7 +389,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index 9fc77f650..3dccade4c 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -1,4 +1,5 @@ use std::fmt; +use std::fmt::Debug; use futures::future::BoxFuture; #[cfg(test)] diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs index 7522c7956..50a6b8b9c 100644 --- a/packages/torrent-repository/src/container.rs +++ b/packages/torrent-repository/src/container.rs @@ -16,8 +16,6 @@ pub struct TorrentRepositoryContainer { impl TorrentRepositoryContainer { #[must_use] pub fn initialize() -> Self { - let swarms = Arc::new(Swarms::default()); - // Torrent repository stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); @@ -27,6 +25,8 @@ impl TorrentRepositoryContainer { let stats_event_sender = event_bus.sender(); + let swarms = Arc::new(Swarms::new(stats_event_sender.clone())); + Self { swarms, event_bus, diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index d68df0b1b..2073575a8 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -9,13 +9,25 @@ use crate::statistics::repository::Repository; /// /// This function panics if the client IP address is not the same as the IP /// version of the event. -pub async fn handle_event(_event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { - /*match event { - Event::TorrentAdded { .. } => {} - Event::TorrentRemoved { .. } => {} - Event::PeerAdded { .. } => {} - Event::PeerRemoved { .. } => {} - }*/ +pub async fn handle_event(event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { + match event { + Event::TorrentAdded { info_hash, .. } => { + // todo: update metrics + tracing::debug!("Torrent added {info_hash}"); + } + Event::TorrentRemoved { info_hash } => { + // todo: update metrics + tracing::debug!("Torrent removed {info_hash}"); + } + Event::PeerAdded { announcement } => { + // todo: update metrics + tracing::debug!("Peer added {announcement:?}"); + } + Event::PeerRemoved { socket_addr, peer_id } => { + // todo: update metrics + tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); + } + } tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 9dddaa0c0..d92e1755a 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -8,15 +8,26 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use crate::event::sender::Sender; +use crate::event::Event; use crate::swarm::Swarm; -use crate::{SwarmHandle, TORRENT_REPOSITORY_LOG_TARGET}; +use crate::SwarmHandle; -#[derive(Default, Debug)] +#[derive(Default)] pub struct Swarms { swarms: SkipMap, + event_sender: Sender, } impl Swarms { + #[must_use] + pub fn new(event_sender: Sender) -> Self { + Self { + swarms: SkipMap::new(), + event_sender, + } + } + /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -37,36 +48,55 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn handle_announcement( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { - tracing::trace!(target: TORRENT_REPOSITORY_LOG_TARGET, "Handling announcement for torrent: {info_hash}"); + let swarm_handle = match self.swarms.get(info_hash) { + None => { + let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { + SwarmHandle::new(Swarm::new(number_of_downloads).into()) + } else { + SwarmHandle::default() + }; - let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) - } else { - SwarmHandle::default() - }; + let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); - let swarm_handle = self.swarms.get_or_insert(*info_hash, swarm_handle); + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::TorrentAdded { + info_hash: *info_hash, + announcement: *peer, + }) + .await; + } + + new_swarm_handle + } + Some(existing_swarm_handle) => existing_swarm_handle, + }; let mut swarm = swarm_handle.value().lock()?; Ok(swarm.handle_announcement(peer)) } - /// Inserts a new swarm. + /// Inserts a new swarm. Only used for testing purposes. pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { - // code-review: swarms builder? + // code-review: swarms builder? or constructor from vec? // It's only used for testing purposes. It allows to pre-define the // initial state of the swarm without having to go through the upsert // process. let swarm_handle = Arc::new(Mutex::new(swarm)); + self.swarms.insert(*info_hash, swarm_handle); + + // IMPORTANT: Notice this does not send an event because is used only + // for testing purposes. The event is sent only when the torrent is + // announced for the first time. } /// Removes a torrent entry from the repository. @@ -75,8 +105,14 @@ impl Swarms { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key).map(|entry| entry.value().clone()) + pub async fn remove(&self, key: &InfoHash) -> Option { + let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash: *key }).await; + } + + swarm_handle } /// Retrieves a tracked torrent handle by its infohash. @@ -402,7 +438,7 @@ impl From>> for Error { #[cfg(test)] mod tests { - mod the_in_memory_torrent_repository { + mod the_swarm_repository { use aquatic_udp_protocol::PeerId; @@ -447,7 +483,7 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -458,8 +494,8 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -474,7 +510,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -485,7 +521,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); @@ -518,7 +554,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); @@ -536,7 +572,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -558,7 +594,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let peers = swarms .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) @@ -575,7 +611,7 @@ mod tests { let excluded_peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &excluded_peer, None); + swarms.handle_announcement(&info_hash, &excluded_peer, None).await.unwrap(); // Add 74 peers for idx in 2..=75 { @@ -589,7 +625,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } let peers = swarms @@ -619,9 +655,9 @@ mod tests { let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); - let _unused = swarms.remove(&info_hash); + let _unused = swarms.remove(&info_hash).await; assert!(swarms.get(&info_hash).is_none()); } @@ -634,7 +670,7 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); // Cut off time is 1 second after the peer was updated swarms @@ -644,13 +680,13 @@ mod tests { assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); } - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { let swarms = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(info_hash, &peer, None); + swarms.handle_announcement(info_hash, &peer, None).await.unwrap(); // Remove the peer swarms @@ -664,7 +700,7 @@ mod tests { async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash); + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash).await; let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, @@ -721,7 +757,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let torrent_entry = swarms.get(&info_hash).unwrap(); @@ -744,7 +780,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -754,7 +790,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let torrent_entries = swarms.get_paginated(None); @@ -782,7 +818,7 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, @@ -796,12 +832,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); @@ -831,12 +867,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); @@ -866,12 +902,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); @@ -915,7 +951,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &leecher(), None); + swarms + .handle_announcement(&sample_info_hash(), &leecher(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -934,7 +973,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &seeder(), None); + swarms + .handle_announcement(&sample_info_hash(), &seeder(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -953,7 +995,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &complete_peer(), None); + swarms + .handle_announcement(&sample_info_hash(), &complete_peer(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -974,7 +1019,10 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _number_of_downloads_increased = swarms.handle_announcement(&gen_seeded_infohash(&i), &leecher(), None); + swarms + .handle_announcement(&gen_seeded_infohash(&i), &leecher(), None) + .await + .unwrap(); } let result_a = start_time.elapsed(); @@ -1010,7 +1058,7 @@ mod tests { let infohash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&infohash, &leecher(), None); + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 8e58b9e76..975457cca 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -377,12 +377,12 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e Some(torrent.clone()) ); assert_eq!( - Some(swarms.remove(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.remove(&info_hash).await.unwrap().lock_or_panic().clone()), Some(torrent) ); assert!(swarms.get(&info_hash).is_none()); - assert!(swarms.remove(&info_hash).is_none()); + assert!(swarms.remove(&info_hash).await.is_none()); } assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); @@ -435,7 +435,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!( swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, entries.len() as u64 + 1 @@ -445,7 +445,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index fac0a38c8..00d42174a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -171,9 +171,10 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = - self.in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent); + let number_of_downloads_increased = self + .in_memory_torrent_repository + .upsert_peer(info_hash, peer, opt_persistent_torrent) + .await; if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; @@ -594,7 +595,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -613,7 +614,8 @@ mod tests { config.core.tracker_policy.persistent_torrent_completed_stat = true; let database = initialize_database(&config.core); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config.core, @@ -648,7 +650,7 @@ mod tests { assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash); + let _unused = in_memory_torrent_repository.remove(&info_hash).await; torrents_manager.load_torrents_from_database().unwrap(); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index aaac811f2..dec52daac 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -144,7 +144,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -163,7 +163,8 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); @@ -219,8 +220,8 @@ mod tests { use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - #[test] - fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -230,7 +231,10 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer, None); + let _number_of_downloads_increased = services + .in_memory_torrent_repository + .upsert_peer(&infohash, &peer, None) + .await; // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -243,18 +247,18 @@ mod tests { assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { + async fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); } - #[test] - fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = true; @@ -262,15 +266,15 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; torrents_manager.cleanup_torrents(); assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - #[test] - fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = false; @@ -278,7 +282,7 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; torrents_manager.cleanup_torrents(); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index c8e593471..37d9d3f5c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -18,7 +18,7 @@ use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// /// Multiple implementations were considered, and the chosen implementation is /// used in production. Other implementations are kept for reference. -#[derive(Debug, Default)] +#[derive(Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores swarms data. swarms: Arc, @@ -49,7 +49,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn upsert_peer( + pub async fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -57,6 +57,7 @@ impl InMemoryTorrentRepository { ) -> bool { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) + .await .expect("Failed to upsert the peer in swarms") } @@ -75,8 +76,8 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key) + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + self.swarms.remove(key).await } /// Removes inactive peers from all torrent entries. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index a35fd7aed..14a4f58f5 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -246,7 +246,9 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); @@ -290,7 +292,9 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -315,8 +319,12 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 0; let limit = 1; @@ -336,8 +344,12 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 1; let limit = 4000; @@ -362,11 +374,15 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -414,7 +430,9 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _ = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index e3667e74a..6dae3d860 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -34,12 +34,13 @@ where { /// Add a torrent to the tracker #[allow(dead_code)] - pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await; } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 5311531aa..ba0721289 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -353,7 +353,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -366,8 +366,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None) + .await; } async fn announce_a_new_peer_using_ipv4( @@ -405,7 +406,7 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv4(Arc::new(core_tracker_services), Arc::new(core_udp_tracker_services)).await; @@ -689,7 +690,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -701,8 +702,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None) + .await; } async fn announce_a_new_peer_using_ipv6( @@ -755,7 +757,7 @@ mod tests { let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv6( core_tracker_services.core_config.clone(), diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5cc84acd6..34d5a5ce2 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -163,7 +163,9 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer, None) + .await; } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { From 6d95d1ad22f87c46310a14f47736e52bac07d993 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 19:12:58 +0100 Subject: [PATCH 043/247] refactor: [#1358] inject event sender in Swarm type It required to use `tokio::sync::Mutex` for the `SwarmHandle` (`Arc>`). Otherwise it's not safe to pass the Swarm lock between threads. --- Cargo.lock | 1 + .../tests/server/v1/contract.rs | 12 +- .../src/v1/context/torrent/handlers.rs | 17 +- .../src/statistics/services.rs | 2 +- .../src/statistics/services.rs | 2 +- packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/lib.rs | 13 +- packages/torrent-repository/src/swarm.rs | 295 +++++++++++------- packages/torrent-repository/src/swarms.rs | 140 +++++---- .../torrent-repository/tests/swarm/mod.rs | 60 ++-- .../torrent-repository/tests/swarms/mod.rs | 198 ++++++------ packages/tracker-core/src/announce_handler.rs | 18 +- packages/tracker-core/src/scrape_handler.rs | 6 +- packages/tracker-core/src/torrent/manager.rs | 48 +-- .../src/torrent/repository/in_memory.rs | 25 +- packages/tracker-core/src/torrent/services.rs | 51 +-- .../src/statistics/services.rs | 2 +- .../src/handlers/announce.rs | 17 +- .../src/statistics/services.rs | 2 +- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- 20 files changed, 510 insertions(+), 402 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b39355065..ddf163cc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4857,6 +4857,7 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", + "futures", "rand 0.9.1", "rstest", "serde", diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index afd4d3168..d864ba67c 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -787,7 +787,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -829,7 +830,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -878,7 +880,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -925,7 +928,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs index 613abbdeb..eecbd9ac3 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs @@ -33,7 +33,7 @@ pub async fn get_torrent_handler( ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash) { + Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash).await { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -85,14 +85,19 @@ pub async fn get_torrents_handler( tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { - torrent_list_response(&get_torrents_page( - &in_memory_torrent_repository, - Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), - )) + torrent_list_response( + &get_torrents_page( + &in_memory_torrent_repository, + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, + ) .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes)).into_response(), + Ok(info_hashes) => { + torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes).await).into_response() + } Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 1c5890ea8..3c8a4fa43 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -47,7 +47,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index b8c2f3f1d..aad31a323 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -32,7 +32,7 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 77192c7cf..1c7cc09fe 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,6 +19,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +futures = "0" serde = "1.0.219" thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index c6790c4db..3adf2f18d 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -4,8 +4,9 @@ pub mod statistics; pub mod swarm; pub mod swarms; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::Arc; +use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Swarms = swarms::Swarms; @@ -24,16 +25,6 @@ pub(crate) type CurrentClock = clock::Stopped; pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; -pub trait LockTrackedTorrent { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; -} - -impl LockTrackedTorrent for SwarmHandle { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { - self.lock().expect("can't acquire lock for tracked torrent handle") - } -} - #[cfg(test)] pub(crate) mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 4437ca410..d1918bd24 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -1,6 +1,8 @@ //! A swarm is a collection of peers that are all trying to download the same //! torrent. use std::collections::BTreeMap; +use std::fmt::Debug; +use std::hash::{Hash, Hasher}; use std::net::SocketAddr; use std::sync::Arc; @@ -10,37 +12,72 @@ use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +use crate::event::sender::Sender; +use crate::event::Event; + +#[derive(Clone, Default)] pub struct Swarm { peers: BTreeMap>, metadata: SwarmMetadata, + event_sender: Sender, +} + +#[allow(clippy::missing_fields_in_debug)] +impl Debug for Swarm { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Swarm") + .field("peers", &self.peers) + .field("metadata", &self.metadata) + .finish() + } +} + +impl Hash for Swarm { + fn hash(&self, state: &mut H) { + self.peers.hash(state); + self.metadata.hash(state); + } +} + +impl PartialEq for Swarm { + fn eq(&self, other: &Self) -> bool { + self.peers == other.peers && self.metadata == other.metadata + } } +impl Eq for Swarm {} + impl Swarm { #[must_use] - pub fn new(downloaded: u32) -> Self { + pub fn new(downloaded: u32, event_sender: Sender) -> Self { Self { peers: BTreeMap::new(), metadata: SwarmMetadata::new(downloaded, 0, 0), + event_sender, } } - pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { let mut downloads_increased: bool = false; let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { - self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased) + self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await } - AnnounceEvent::Stopped => self.remove(incoming_announce), + AnnounceEvent::Stopped => self.remove(incoming_announce).await, }; downloads_increased } - pub fn upsert_peer(&mut self, incoming_announce: Arc, downloads_increased: &mut bool) -> Option> { + pub async fn upsert_peer( + &mut self, + incoming_announce: Arc, + downloads_increased: &mut bool, + ) -> Option> { let is_now_seeder = incoming_announce.is_seeder(); let has_completed = incoming_announce.event == AnnounceEvent::Completed; + let announcement = incoming_announce.clone(); if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. @@ -79,11 +116,19 @@ impl Swarm { // from a known peer } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + announcement: *announcement, + }) + .await; + } + None } } - pub fn remove(&mut self, peer_to_remove: &Peer) -> Option> { + pub async fn remove(&mut self, peer_to_remove: &Peer) -> Option> { match self.peers.remove(&peer_to_remove.peer_addr) { Some(old_peer) => { // A peer has been removed from the swarm. @@ -95,6 +140,15 @@ impl Swarm { self.metadata.incomplete -= 1; } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + socket_addr: old_peer.peer_addr, + peer_id: old_peer.peer_id, + }) + .await; + } + Some(old_peer) } None => None, @@ -246,104 +300,107 @@ mod tests { assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_allow_inserting_a_new_peer() { + #[tokio::test] + async fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), None); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased).await, None); } - #[test] - fn it_should_allow_updating_a_preexisting_peer() { + #[tokio::test] + async fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), Some(Arc::new(peer))); + assert_eq!( + swarm.upsert_peer(peer.into(), &mut downloads_increased).await, + Some(Arc::new(peer)) + ); } - #[test] - fn it_should_allow_getting_all_peers() { + #[tokio::test] + async fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.peers(None), [Arc::new(peer)]); } - #[test] - fn it_should_allow_getting_one_peer_by_id() { + #[tokio::test] + async fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } - #[test] - fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + #[tokio::test] + async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_decrease_the_number_of_peers_after_removing_one() { + #[tokio::test] + async fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer); + swarm.remove(&peer).await; assert!(swarm.is_empty()); } - #[test] - fn it_should_allow_removing_an_existing_peer() { + #[tokio::test] + async fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - let old = swarm.remove(&peer); + let old = swarm.remove(&peer).await; assert_eq!(old, Some(Arc::new(peer))); assert_eq!(swarm.get(&peer.peer_addr), None); } - #[test] - fn it_should_allow_removing_a_non_existing_peer() { + #[tokio::test] + async fn it_should_allow_removing_a_non_existing_peer() { let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - assert_eq!(swarm.remove(&peer), None); + assert_eq!(swarm.remove(&peer).await, None); } - #[test] - fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + #[tokio::test] + async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -351,19 +408,19 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } - #[test] - fn it_should_remove_inactive_peers() { + #[tokio::test] + async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -371,7 +428,7 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second); @@ -379,8 +436,8 @@ mod tests { assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_not_remove_active_peers() { + #[tokio::test] + async fn it_should_not_remove_active_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -388,7 +445,7 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second); @@ -407,23 +464,23 @@ mod tests { Swarm::default() } - fn not_empty_swarm() -> Swarm { + async fn not_empty_swarm() -> Swarm { let mut swarm = Swarm::default(); - swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false); + swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; swarm } - fn not_empty_swarm_with_downloads() -> Swarm { + async fn not_empty_swarm_with_downloads() -> Swarm { let mut swarm = Swarm::default(); let mut peer = PeerBuilder::leecher().build(); let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert!(swarm.metadata().downloads() > 0); @@ -457,13 +514,13 @@ mod tests { assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().await.should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + #[tokio::test] + async fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( ) { let policy = TrackerPolicy { remove_peerless_torrents: true, @@ -471,7 +528,7 @@ mod tests { ..Default::default() }; - assert!(!not_empty_swarm_with_downloads().should_be_removed(&policy)); + assert!(!not_empty_swarm_with_downloads().await.should_be_removed(&policy)); } } @@ -486,33 +543,35 @@ mod tests { assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm() + .await + .should_be_removed(&don_not_remove_peerless_torrents_policy())); } } } - #[test] - fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + #[tokio::test] + async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 2); } - #[test] - fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + #[tokio::test] + async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -523,27 +582,27 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_return_the_metadata() { + #[tokio::test] + async fn it_should_return_the_metadata() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!( swarm.metadata(), @@ -555,32 +614,32 @@ mod tests { ); } - #[test] - fn it_should_return_the_number_of_seeders_in_the_list() { + #[tokio::test] + async fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (seeders, _leechers) = swarm.seeders_and_leechers(); assert_eq!(seeders, 1); } - #[test] - fn it_should_return_the_number_of_leechers_in_the_list() { + #[tokio::test] + async fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -594,8 +653,8 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + #[tokio::test] + async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -603,13 +662,13 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); } - #[test] - fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + #[tokio::test] + async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -617,13 +676,13 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -632,7 +691,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads); } @@ -643,34 +702,34 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); - swarm.remove(&leecher); + swarm.remove(&leecher).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); - swarm.remove(&seeder); + swarm.remove(&seeder).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -683,14 +742,14 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); @@ -699,14 +758,14 @@ mod tests { assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); @@ -722,80 +781,80 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + #[tokio::test] + async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + #[tokio::test] + async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); } - #[test] - fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + #[tokio::test] + async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index d92e1755a..277a85cc2 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -1,7 +1,8 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; +use tokio::sync::Mutex; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; @@ -48,6 +49,7 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. + #[allow(clippy::await_holding_lock)] pub async fn handle_announcement( &self, info_hash: &InfoHash, @@ -57,7 +59,7 @@ impl Swarms { let swarm_handle = match self.swarms.get(info_hash) { None => { let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) + SwarmHandle::new(Swarm::new(number_of_downloads, self.event_sender.clone()).into()) } else { SwarmHandle::default() }; @@ -78,9 +80,11 @@ impl Swarms { Some(existing_swarm_handle) => existing_swarm_handle, }; - let mut swarm = swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock().await; - Ok(swarm.handle_announcement(peer)) + let downloads_increased = swarm.handle_announcement(peer).await; + + Ok(downloads_increased) } /// Inserts a new swarm. Only used for testing purposes. @@ -162,11 +166,11 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { match self.swarms.get(info_hash) { None => Ok(None), Some(swarm_handle) => { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; Ok(Some(swarm.metadata())) } } @@ -183,8 +187,8 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { - match self.get_swarm_metadata(info_hash) { + pub async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { + match self.get_swarm_metadata(info_hash).await { Ok(Some(swarm_metadata)) => Ok(swarm_metadata), Ok(None) => Ok(SwarmMetadata::zeroed()), Err(err) => Err(err), @@ -207,7 +211,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_peers_peers_excluding( + pub async fn get_peers_peers_excluding( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -216,7 +220,7 @@ impl Swarms { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) } } @@ -236,11 +240,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { + pub async fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers(Some(limit))) } } @@ -255,7 +259,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { tracing::info!( "Removing inactive peers since: {:?} ...", convert_from_timestamp_to_datetime_utc(current_cutoff) @@ -264,7 +268,7 @@ impl Swarms { let mut inactive_peers_removed = 0; for swarm_handle in &self.swarms { - let mut swarm = swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock().await; let removed = swarm.remove_inactive(current_cutoff); inactive_peers_removed += removed; } @@ -283,13 +287,13 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { + pub async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { tracing::info!("Removing peerless torrents ..."); let mut peerless_torrents_removed = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.meets_retaining_policy(policy) { continue; @@ -320,7 +324,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(*completed).into()); + let entry = SwarmHandle::new(Swarm::new(*completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -348,11 +352,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn get_aggregate_swarm_metadata(&self) -> Result { + pub async fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; let stats = swarm.metadata(); @@ -376,11 +380,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peerless_torrents(&self) -> Result { + pub async fn count_peerless_torrents(&self) -> Result { let mut peerless_torrents = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.is_peerless() { peerless_torrents += 1; @@ -400,11 +404,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peers(&self) -> Result { + pub async fn count_peers(&self) -> Result { let mut peers = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; peers += swarm.len(); } @@ -424,16 +428,7 @@ impl Swarms { } #[derive(thiserror::Error, Debug, Clone)] -pub enum Error { - #[error("Can't acquire swarm lock")] - CannotAcquireSwarmLock, -} - -impl From>> for Error { - fn from(_error: std::sync::PoisonError>) -> Self { - Error::CannotAcquireSwarmLock - } -} +pub enum Error {} #[cfg(test)] mod tests { @@ -523,7 +518,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -532,7 +527,7 @@ mod tests { async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).unwrap(); + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); assert!(peers.is_empty()); } @@ -557,7 +552,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers.len(), 74); } @@ -582,6 +577,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -598,6 +594,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -630,6 +627,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers.len(), 74); @@ -675,9 +673,14 @@ mod tests { // Cut off time is 1 second after the peer was updated swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); - assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); + assert!(!swarms + .get_swarm_peers(&info_hash, 74) + .await + .unwrap() + .contains(&Arc::new(peer))); } async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { @@ -691,6 +694,7 @@ mod tests { // Remove the peer swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); swarms @@ -707,7 +711,7 @@ mod tests { ..Default::default() }; - swarms.remove_peerless_torrents(&tracker_policy).unwrap(); + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); assert!(swarms.get(&info_hash).is_none()); } @@ -721,7 +725,7 @@ mod tests { use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{LockTrackedTorrent, SwarmHandle}; + use crate::{Swarm, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -733,19 +737,19 @@ mod tests { number_of_peers: usize, } + async fn torrent_entry_info(swarm_handle: SwarmHandle) -> TorrentEntryInfo { + let torrent_guard = swarm_handle.lock().await; + torrent_guard.clone().into() + } + #[allow(clippy::from_over_into)] - impl Into for SwarmHandle { + impl Into for Swarm { fn into(self) -> TorrentEntryInfo { - let torrent_guard = self.lock_or_panic(); - let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.metadata(), - peers: torrent_guard.peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.len(), + swarm_metadata: self.metadata(), + peers: self.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: self.len(), }; - - drop(torrent_guard); - torrent_entry_info } } @@ -759,7 +763,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let torrent_entry = swarms.get(&info_hash).unwrap(); + let torrent_entry_info = torrent_entry_info(swarms.get(&info_hash).unwrap()).await; assert_eq!( TorrentEntryInfo { @@ -771,7 +775,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -780,7 +784,9 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -796,7 +802,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -808,7 +814,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry ); } @@ -818,7 +824,9 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, @@ -844,7 +852,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -856,7 +864,7 @@ mod tests { peers: vec!(peer_one), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -879,7 +887,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -891,7 +899,7 @@ mod tests { peers: vec!(peer_two), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -934,7 +942,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let swarms = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -956,7 +964,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -978,7 +986,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -1000,7 +1008,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -1027,7 +1035,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -1060,7 +1068,7 @@ mod tests { swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); assert_eq!( swarm_metadata, @@ -1076,7 +1084,7 @@ mod tests { async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -1103,7 +1111,7 @@ mod tests { swarms.import_persistent(&persistent_torrents); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index d529b0243..1f5d0b737 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -47,39 +47,39 @@ pub enum Makes { Three, } -fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { +async fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - swarm.handle_announcement(&peer_1); + swarm.handle_announcement(&peer_1).await; let peer_2 = a_completed_peer(2); - swarm.handle_announcement(&peer_2); + swarm.handle_announcement(&peer_2).await; let mut peer_3 = a_started_peer(3); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; vec![peer_1, peer_2, peer_3] } } @@ -89,7 +89,7 @@ fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { #[case::empty(&Makes::Empty)] #[tokio::test] async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; assert_eq!(swarm.len(), 0); } @@ -106,7 +106,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let has_peers = !swarm.is_empty(); let has_downloads = swarm.metadata().downloaded != 0; @@ -140,7 +140,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let torrent_peers = swarm.peers(None); @@ -159,11 +159,11 @@ async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // Make and insert a new peer. let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Inserted Peer by Id. let peers = swarm.peers(None); @@ -176,7 +176,7 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Updated Peer by Id. let peers = swarm.peers(None); @@ -198,11 +198,11 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut swarm, makes); + make(&mut swarm, makes).await; let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // The started peer should be inserted. let peers = swarm.peers(None); @@ -215,7 +215,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mu // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // It should be removed now. let peers = swarm.peers(None); @@ -237,7 +237,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[values(swarm())] mut torrent: Swarm, #[case] makes: &Makes, ) { - make(&mut torrent, makes); + make(&mut torrent, makes).await; let downloaded = torrent.metadata().downloaded; let peers = torrent.peers(None); @@ -248,7 +248,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.handle_announcement(&peer); + torrent.handle_announcement(&peer).await; let stats = torrent.metadata(); if is_already_completed { @@ -265,7 +265,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); let peers = swarm.peers(None); @@ -275,7 +275,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if is_already_non_left { @@ -294,7 +294,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); let peers = swarm.peers(None); @@ -304,7 +304,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if completed_already { @@ -323,7 +323,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); @@ -338,7 +338,7 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut // set the address to the socket. peer.peer_addr = socket; - swarm.handle_announcement(&peer); // Add peer + swarm.handle_announcement(&peer).await; // Add peer // It should not include the peer that has the same socket. assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); @@ -352,12 +352,12 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; } let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); @@ -376,7 +376,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let mut peer = a_completed_peer(-1); @@ -385,7 +385,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw peer.updated = now.sub(EXPIRE); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; assert_eq!(swarm.len(), peers.len() + 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 975457cca..d8ee354c8 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -3,13 +3,14 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; +use futures::future::join_all; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; +use torrust_tracker_torrent_repository::Swarms; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -31,49 +32,49 @@ fn default() -> Entries { } #[fixture] -fn started() -> Entries { +async fn started() -> Entries { let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_started_peer(1)); + swarm.handle_announcement(&a_started_peer(1)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn completed() -> Entries { +async fn completed() -> Entries { let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_completed_peer(2)); + swarm.handle_announcement(&a_completed_peer(2)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn downloaded() -> Entries { +async fn downloaded() -> Entries { let mut swarm = Swarm::default(); let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn three() -> Entries { +async fn three() -> Entries { let mut started = Swarm::default(); let started_h = &mut DefaultHasher::default(); - started.handle_announcement(&a_started_peer(1)); + started.handle_announcement(&a_started_peer(1)).await; started.hash(started_h); let mut completed = Swarm::default(); let completed_h = &mut DefaultHasher::default(); - completed.handle_announcement(&a_completed_peer(2)); + completed.handle_announcement(&a_completed_peer(2)).await; completed.hash(completed_h); let mut downloaded = Swarm::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes::new(0); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded.hash(downloaded_h); vec![ @@ -84,12 +85,12 @@ fn three() -> Entries { } #[fixture] -fn many_out_of_order() -> Entries { +async fn many_out_of_order() -> Entries { let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)).await; entries.insert((InfoHash::from(&i), entry)); } @@ -99,12 +100,12 @@ fn many_out_of_order() -> Entries { } #[fixture] -fn many_hashed_in_order() -> Entries { +async fn many_hashed_in_order() -> Entries { let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)).await; let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -191,21 +192,18 @@ fn policy_remove_persist() -> TrackerPolicy { #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, swarm)) = entries.first() { - assert_eq!( - Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), - Some(swarm.clone()) - ); + assert_eq!(Some(repo.get(info_hash).unwrap().lock().await.clone()), Some(swarm.clone())); } else { assert!(repo.get(&InfoHash::default()).is_none()); } @@ -214,23 +212,23 @@ async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[values(swarms())] repo: Swarms, #[case] entries: Entries, - many_out_of_order: Entries, + #[future] many_out_of_order: Entries, ) { make(&repo, &entries); let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - make(&repo, &many_out_of_order); + make(&repo, &many_out_of_order.await); let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); @@ -247,12 +245,12 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated( #[values(swarms())] repo: Swarms, @@ -267,11 +265,15 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let swarms: Vec<(InfoHash, Swarm)> = repo - .get_paginated(Some(&paginated)) - .iter() - .map(|(i, swarm_handle)| (*i, swarm_handle.lock_or_panic().clone())) - .collect(); + let page = repo.get_paginated(Some(&paginated)); + + let futures = page.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); + + let swarms: Vec<(InfoHash, Swarm)> = join_all(futures).await; assert_eq!(swarms, vec![]); } @@ -287,7 +289,7 @@ async fn it_should_get_paginated( } } - // it should return the only the second entry if both the limit and the offset are one. + // it should return only the second entry if both the limit and the offset are one. Pagination { limit: 1, offset: 1 } => { if info_hashes.len() > 1 { let page = repo.get_paginated(Some(&paginated)); @@ -295,7 +297,7 @@ async fn it_should_get_paginated( assert_eq!(page[0].0, info_hashes[1]); } } - // the other cases are not yet tested. + _ => {} } } @@ -303,12 +305,12 @@ async fn it_should_get_paginated( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; @@ -326,18 +328,18 @@ async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entri metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap(), metrics); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_import_persistent_torrents( #[values(swarms())] swarms: Swarms, @@ -346,12 +348,15 @@ async fn it_should_import_persistent_torrents( ) { make(&swarms, &entries); - let mut downloaded = swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); swarms.import_persistent(&persistent_torrents); - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded, downloaded); + assert_eq!( + swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded, + downloaded + ); for (entry, _) in persistent_torrents { assert!(swarms.get(&entry).is_some()); @@ -361,23 +366,23 @@ async fn it_should_import_persistent_torrents( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { make(&swarms, &entries); for (info_hash, torrent) in entries { assert_eq!( - Some(swarms.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.get(&info_hash).unwrap().lock().await.clone()), Some(torrent.clone()) ); assert_eq!( - Some(swarms.remove(&info_hash).await.unwrap().lock_or_panic().clone()), + Some(swarms.remove(&info_hash).await.unwrap().lock().await.clone()), Some(torrent) ); @@ -385,18 +390,18 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e assert!(swarms.remove(&info_hash).await.is_none()); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, 0); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; @@ -437,7 +442,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!( - swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, + swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, entries.len() as u64 + 1 ); } @@ -446,7 +451,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // and verify the swarm metadata was updated. { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); + let stats = swarms.get_swarm_metadata(&info_hash).await.unwrap(); assert_eq!( stats, Some(SwarmMetadata { @@ -460,7 +465,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Verify that this new peer was inserted into the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(entry.peers(None).contains(&peer.into())); } @@ -468,13 +473,14 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c { swarms .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await .unwrap(); } // Verify that the this peer was removed from the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(!entry.peers(None).contains(&peer.into())); } } @@ -482,12 +488,12 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_peerless_torrents( #[values(swarms())] swarms: Swarms, @@ -496,13 +502,17 @@ async fn it_should_remove_peerless_torrents( ) { make(&swarms, &entries); - swarms.remove_peerless_torrents(&policy).unwrap(); + swarms.remove_peerless_torrents(&policy).await.unwrap(); + + let paginated = swarms.get_paginated(None); // ← store the result in a named variable + + let futures = paginated.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); - let torrents: Vec<(InfoHash, Swarm)> = swarms - .get_paginated(None) - .iter() - .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) - .collect(); + let torrents: Vec<(InfoHash, Swarm)> = join_all(futures).await; for (_, entry) in torrents { assert!(entry.meets_retaining_policy(&policy)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 00d42174a..a2e8db743 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -180,16 +180,20 @@ impl AnnounceHandler { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; } - Ok(self.build_announce_data(info_hash, peer, peers_wanted)) + Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } /// Builds the announce data for the peer making the request. - fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { + async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); + .get_peers_for(info_hash, peer, peers_wanted.limit()) + .await; - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash); + let swarm_metadata = self + .in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await; AnnounceData { peers, @@ -595,7 +599,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; + use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -659,10 +663,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.lock_or_panic().metadata().downloaded, 1); + assert_eq!(torrent_entry.lock().await.metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().is_empty()); + assert!(torrent_entry.lock().await.is_empty()); } } diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 5d78c7d90..443d989a6 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -112,7 +112,11 @@ impl ScrapeHandler { for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash), + Ok(()) => { + self.in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await + } Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index dec52daac..bc193bd4f 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -90,35 +90,36 @@ impl TorrentsManager { /// 2. If the tracker is configured to remove peerless torrents /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. - pub fn cleanup_torrents(&self) { - self.log_aggregate_swarm_metadata(); + pub async fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata().await; - self.remove_inactive_peers(); + self.remove_inactive_peers().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; - self.remove_peerless_torrents(); + self.remove_peerless_torrents().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; } - fn remove_inactive_peers(&self) { + async fn remove_inactive_peers(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff).await; } - fn remove_peerless_torrents(&self) { + async fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository - .remove_peerless_torrents(&self.config.tracker_policy); + .remove_peerless_torrents(&self.config.tracker_policy) + .await; } } - fn log_aggregate_swarm_metadata(&self) { + async fn log_aggregate_swarm_metadata(&self) { // Pre-calculated data - let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata().await; tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", torrents = aggregate_swarm_metadata.total_torrents, @@ -128,8 +129,8 @@ impl TorrentsManager { ); // Hot data (iterating over data structures) - let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents(); - let peers = self.in_memory_torrent_repository.count_peers(); + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents().await; + let peers = self.in_memory_torrent_repository.count_peers().await; tracing::info!(name: "hot_aggregate_swarm_metadata", peerless_torrents = peerless_torrents, @@ -144,7 +145,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; + use torrust_tracker_torrent_repository::Swarms; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -184,8 +185,8 @@ mod tests { ) } - #[test] - fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { + #[tokio::test] + async fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -199,7 +200,8 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() - .lock_or_panic() + .lock() + .await .metadata() .downloaded, 1 @@ -242,7 +244,7 @@ mod tests { )) .unwrap(); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } @@ -254,7 +256,9 @@ mod tests { let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + in_memory_torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await; } #[tokio::test] @@ -268,7 +272,7 @@ mod tests { add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } @@ -284,7 +288,7 @@ mod tests { add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_some()); } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 37d9d3f5c..311480306 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -93,9 +93,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.swarms .remove_inactive_peers(current_cutoff) + .await .expect("Failed to remove inactive peers from swarms"); } @@ -112,9 +113,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.swarms .remove_peerless_torrents(policy) + .await .expect("Failed to remove peerless torrents from swarms"); } @@ -168,9 +170,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error.s #[must_use] - pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + pub(crate) async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { self.swarms .get_swarm_metadata_or_default(info_hash) + .await .expect("Failed to get swarm metadata") } @@ -196,9 +199,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + pub(crate) async fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { self.swarms .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .await .expect("Failed to get other peers in swarm") } @@ -220,10 +224,11 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` self.swarms .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) + .await .expect("Failed to get other peers in swarm") } @@ -241,9 +246,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { self.swarms .get_aggregate_swarm_metadata() + .await .expect("Failed to get aggregate swarm metadata") } @@ -253,9 +259,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peerless_torrents(&self) -> usize { + pub async fn count_peerless_torrents(&self) -> usize { self.swarms .count_peerless_torrents() + .await .expect("Failed to count peerless torrents") } @@ -265,8 +272,8 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peers(&self) -> usize { - self.swarms.count_peers().expect("Failed to count peers") + pub async fn count_peers(&self) -> usize { + self.swarms.count_peers().await.expect("Failed to count peers") } /// Imports persistent torrent data into the in-memory repository. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 14a4f58f5..97694a80f 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::LockTrackedTorrent; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -94,14 +93,17 @@ pub struct BasicInfo { /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { +pub async fn get_torrent_info( + in_memory_torrent_repository: &Arc, + info_hash: &InfoHash, +) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); - let peers = torrent_entry.lock_or_panic().peers(None); + let peers = torrent_entry.lock().await.peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -136,14 +138,14 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, pagination: Option<&Pagination>, ) -> Vec { let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); basic_infos.push(BasicInfo { info_hash, @@ -178,19 +180,21 @@ pub fn get_torrents_page( /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { +pub async fn get_torrents( + in_memory_torrent_repository: &Arc, + info_hashes: &[InfoHash], +) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository - .get(info_hash) - .map(|torrent_entry| torrent_entry.lock_or_panic().metadata()) - { + if let Some(torrent_entry) = in_memory_torrent_repository.get(info_hash) { + let metadata = torrent_entry.lock().await.metadata(); + basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(stats.complete), - completed: u64::from(stats.downloaded), - leechers: u64::from(stats.incomplete), + seeders: u64::from(metadata.complete), + completed: u64::from(metadata.downloaded), + leechers: u64::from(metadata.incomplete), }); } } @@ -235,7 +239,8 @@ mod tests { let torrent_info = get_torrent_info( &in_memory_torrent_repository, &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 - ); + ) + .await; assert!(torrent_info.is_none()); } @@ -250,7 +255,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); + let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -280,7 +285,7 @@ mod tests { async fn it_should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -296,7 +301,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -329,7 +334,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -354,7 +359,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -384,7 +389,7 @@ mod tests { .upsert_peer(&info_hash2, &sample_peer(), None) .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -419,7 +424,7 @@ mod tests { async fn it_should_return_an_empty_list_if_none_of_the_requested_torrents_is_found() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]).await; assert!(torrent_info.is_empty()); } @@ -434,7 +439,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; assert_eq!( torrent_info, diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index c76f02040..20ba2ea7f 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -63,7 +63,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index ba0721289..86e7888f2 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -254,7 +254,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -348,7 +349,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -505,7 +507,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); @@ -587,7 +590,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -684,7 +688,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -940,7 +945,7 @@ mod tests { .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()).await; let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index a2215067b..c8b24a744 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -66,7 +66,7 @@ pub async fn get_metrics( ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 0107b5370..8a3a71a44 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -45,7 +45,7 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; From 1eb545c0fb233bba0206c2557401c2b4c686cc3a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 09:16:15 +0100 Subject: [PATCH 044/247] feat: [#1358] remove persistent metric from torrent-repository pkg This package dones not have persistence. Persistence is only handle in the `tracker-core` pacakge. The metric will be included there. --- packages/torrent-repository/src/statistics/mod.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index b0dce479f..fc8f1e1e8 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -8,7 +8,6 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; -const TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -22,13 +21,5 @@ pub fn describe_metrics() -> Metrics { )), ); - metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), - Some(Unit::Count), - Some(&MetricDescription::new( - "The total number of torrent downloads since persistent statistics were enabled the first time.", - )), - ); - metrics } From 29a2dfd80dd76176ed517534ae5f0bf75a59c50a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 13:47:34 +0100 Subject: [PATCH 045/247] dev: change default config Decrease torrent cleanup interval and peer timeout to do manual tests faster. --- share/default/config/tracker.development.sqlite3.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 488743eb9..89d700132 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,14 +7,14 @@ schema_version = "2.0.0" threshold = "info" [core] -#inactive_peer_cleanup_interval = 60 +inactive_peer_cleanup_interval = 60 listed = false private = false [core.tracker_policy] -#max_peer_timeout = 30 +max_peer_timeout = 30 persistent_torrent_completed_stat = true -#remove_peerless_torrents = true +remove_peerless_torrents = true [[udp_trackers]] bind_address = "0.0.0.0:6868" From d47483ff065b65f3ab51e27a481bf82c5048e3c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 13:52:47 +0100 Subject: [PATCH 046/247] feat: [#1358] new metric in torrent-repository: total number of torrents --- .../src/statistics/event/handler.rs | 4 -- packages/metrics/src/metric_collection.rs | 14 +++++- .../src/statistics/event/handler.rs | 29 ++++++++---- .../src/statistics/metrics.rs | 26 ++++++++++- .../torrent-repository/src/statistics/mod.rs | 7 +++ .../src/statistics/repository.rs | 44 +++++++++++++++++-- packages/torrent-repository/src/swarms.rs | 6 +++ 7 files changed, 111 insertions(+), 19 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 8d2ad1aa2..f5506f6e3 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -9,10 +9,6 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; -/// # Panics -/// -/// This function panics if the client IP address is not the same as the IP -/// version of the event. pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 438f3b03a..83b08f178 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -140,7 +140,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn increase_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn increment_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), @@ -156,7 +161,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn decrease_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn decrement_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2073575a8..6428bbeb7 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -1,23 +1,36 @@ use std::sync::Arc; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; -/// # Panics -/// -/// This function panics if the client IP address is not the same as the IP -/// version of the event. -pub async fn handle_event(event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { +pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TorrentAdded { info_hash, .. } => { - // todo: update metrics tracing::debug!("Torrent added {info_hash}"); + + match stats_repository + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + }; } Event::TorrentRemoved { info_hash } => { - // todo: update metrics tracing::debug!("Torrent removed {info_hash}"); + + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; } Event::PeerAdded { announcement } => { // todo: update metrics @@ -28,6 +41,4 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, _now tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); } } - - tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); } diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/torrent-repository/src/statistics/metrics.rs index 6ee275e63..f8ab3f9d9 100644 --- a/packages/torrent-repository/src/statistics/metrics.rs +++ b/packages/torrent-repository/src/statistics/metrics.rs @@ -15,7 +15,7 @@ impl Metrics { /// # Errors /// /// Returns an error if the metric does not exist and it cannot be created. - pub fn increase_counter( + pub fn increment_counter( &mut self, metric_name: &MetricName, labels: &LabelSet, @@ -36,4 +36,28 @@ impl Metrics { ) -> Result<(), Error> { self.metric_collection.set_gauge(metric_name, labels, value, now) } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index fc8f1e1e8..f1507b7bb 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -7,12 +7,19 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; +const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents.")), + ); + metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index 9fdff7008..a8cb8549e 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -36,8 +36,8 @@ impl Repository { /// # Errors /// /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn increase_counter( + /// increment the counter. + pub async fn increment_counter( &self, metric_name: &MetricName, labels: &LabelSet, @@ -45,7 +45,45 @@ impl Repository { ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - let result = stats_lock.increase_counter(metric_name, labels, now); + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); drop(stats_lock); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 277a85cc2..41123fd50 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -299,9 +299,15 @@ impl Swarms { continue; } + let info_hash = *swarm_handle.key(); + swarm_handle.remove(); peerless_torrents_removed += 1; + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash }).await; + } } tracing::info!(peerless_torrents_removed = peerless_torrents_removed); From ba2033bf60d3b9e56fe8063d57db648fa39858ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 15:08:40 +0100 Subject: [PATCH 047/247] fix: [#1358] trigger PeerRemoved event when peer is removed due to inactivity --- packages/torrent-repository/src/event.rs | 2 +- .../src/statistics/event/handler.rs | 5 ++- packages/torrent-repository/src/swarm.rs | 38 ++++++++++++++----- packages/torrent-repository/src/swarms.rs | 4 +- .../torrent-repository/tests/swarm/mod.rs | 2 +- 5 files changed, 36 insertions(+), 15 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 57fe7bc4b..1184714ae 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -17,7 +17,7 @@ pub enum Event { announcement: PeerAnnouncement, }, PeerRemoved { - socket_addr: SocketAddr, + peer_addr: SocketAddr, peer_id: PeerId, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 6428bbeb7..8022102d9 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -36,7 +36,10 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: // todo: update metrics tracing::debug!("Peer added {announcement:?}"); } - Event::PeerRemoved { socket_addr, peer_id } => { + Event::PeerRemoved { + peer_addr: socket_addr, + peer_id, + } => { // todo: update metrics tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d1918bd24..32785cada 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -143,7 +143,7 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { event_sender .send(Event::PeerRemoved { - socket_addr: old_peer.peer_addr, + peer_addr: old_peer.peer_addr, peer_id: old_peer.peer_id, }) .await; @@ -155,10 +155,11 @@ impl Swarm { } } - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> u64 { - let mut inactive_peers_removed = 0; + pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let mut number_of_peers_removed = 0; + let mut removed_peers = Vec::new(); - self.peers.retain(|_, peer| { + self.peers.retain(|_key, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; if !is_active { @@ -169,13 +170,30 @@ impl Swarm { self.metadata.incomplete -= 1; } - inactive_peers_removed += 1; + number_of_peers_removed += 1; + + if let Some(_event_sender) = self.event_sender.as_deref() { + // Events can not be trigger here because retain does not allow + // async closures. + removed_peers.push((peer.peer_addr, peer.peer_id)); + } } is_active }); - inactive_peers_removed + if let Some(event_sender) = self.event_sender.as_deref() { + for (peer_addr, peer_id) in &removed_peers { + event_sender + .send(Event::PeerRemoved { + peer_addr: *peer_addr, + peer_id: *peer_id, + }) + .await; + } + } + + number_of_peers_removed } #[must_use] @@ -431,7 +449,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive(last_update_time + one_second); + swarm.remove_inactive(last_update_time + one_second).await; assert_eq!(swarm.len(), 0); } @@ -448,7 +466,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive(last_update_time - one_second); + swarm.remove_inactive(last_update_time - one_second).await; assert_eq!(swarm.len(), 1); } @@ -753,7 +771,7 @@ mod tests { let leechers = swarm.metadata().leechers(); - swarm.remove_inactive(leecher.updated + Duration::from_secs(1)); + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -769,7 +787,7 @@ mod tests { let seeders = swarm.metadata().seeders(); - swarm.remove_inactive(seeder.updated + Duration::from_secs(1)); + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 41123fd50..c74fec3ea 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -259,7 +259,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { tracing::info!( "Removing inactive peers since: {:?} ...", convert_from_timestamp_to_datetime_utc(current_cutoff) @@ -269,7 +269,7 @@ impl Swarms { for swarm_handle in &self.swarms { let mut swarm = swarm_handle.value().lock().await; - let removed = swarm.remove_inactive(current_cutoff); + let removed = swarm.remove_inactive(current_cutoff).await; inactive_peers_removed += removed; } diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index 1f5d0b737..f7ae4b439 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -390,7 +390,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw assert_eq!(swarm.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - swarm.remove_inactive(current_cutoff); + swarm.remove_inactive(current_cutoff).await; assert_eq!(swarm.len(), peers.len()); } From 269d27398975df921a846770e58b4d0a5bfde256 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 20:14:31 +0100 Subject: [PATCH 048/247] refactor: [#1358] rename metric From `TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL` to `TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL`. None of the metrics in the `torrent-repositry` package will be persisted. We can use the `persitent` sufix for metrics in other packages to avoid conflicts. It's planned to use the same metric in the `tracker-core` package but with the historial persited value. --- packages/torrent-repository/src/statistics/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index f1507b7bb..941d619e9 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -21,7 +21,7 @@ pub fn describe_metrics() -> Metrics { ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(&MetricDescription::new( "The total number of torrent downloads since the tracker process started.", From 01a9970256c1c4f76ee9efb1bfb5faa886c7fd3d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 07:40:22 +0100 Subject: [PATCH 049/247] feat: [#1358] new metric in torrent-repository: total number of peers You can tested it manually with: ``` cargo run -p torrust-tracker-client --bin udp_tracker_client announce udp://127.0.0.1:6969 443c7602b4fde83d1154d6d9da48808418b181b6 | jq curl -s "http://localhost:1212/api/v1/metrics?token=MyAccessToken&format=prometheus" | grep torrent_repository_peers_total Finished `dev` profile [optimized + debuginfo] target(s) in 0.10s Running `target/debug/udp_tracker_client announce 'udp://127.0.0.1:6969' 443c7602b4fde83d1154d6d9da48808418b181b6` { "AnnounceIpv4": { "transaction_id": -888840697, "announce_interval": 120, "leechers": 0, "seeders": 1, "peers": [] } } torrent_repository_peers_total{peer_role="seeder"} 1 ``` --- packages/torrent-repository/src/event.rs | 10 ++-- .../src/statistics/event/handler.rs | 51 ++++++++++++++----- .../torrent-repository/src/statistics/mod.rs | 16 ++++++ packages/torrent-repository/src/swarm.rs | 24 ++------- 4 files changed, 61 insertions(+), 40 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 1184714ae..fecb8cd1d 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -1,8 +1,5 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::peer::{Peer, PeerAnnouncement}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { @@ -14,11 +11,10 @@ pub enum Event { info_hash: InfoHash, }, PeerAdded { - announcement: PeerAnnouncement, + peer: Peer, }, PeerRemoved { - peer_addr: SocketAddr, - peer_id: PeerId, + peer: Peer, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 8022102d9..e869e7c1a 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -1,17 +1,17 @@ use std::sync::Arc; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; +use crate::statistics::{TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL}; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TorrentAdded { info_hash, .. } => { - tracing::debug!("Torrent added {info_hash}"); + tracing::debug!(info_hash = ?info_hash, "Torrent added",); match stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) @@ -22,7 +22,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: }; } Event::TorrentRemoved { info_hash } => { - tracing::debug!("Torrent removed {info_hash}"); + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); match stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) @@ -32,16 +32,39 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), }; } - Event::PeerAdded { announcement } => { - // todo: update metrics - tracing::debug!("Peer added {announcement:?}"); + Event::PeerAdded { peer } => { + tracing::debug!(peer = ?peer, "Peer added", ); + + let label_set: LabelSet = if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + }; + + match stats_repository + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + }; } - Event::PeerRemoved { - peer_addr: socket_addr, - peer_id, - } => { - // todo: update metrics - tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); + Event::PeerRemoved { peer } => { + tracing::debug!(peer = ?peer, "Peer removed", ); + + let label_set: LabelSet = if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + }; + + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; } } } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 941d619e9..4deaf19cb 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -7,13 +7,21 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; +// Torrent metrics + const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +// Peers metrics + +const TORRENT_REPOSITORY_PEERS_TOTAL: &str = "torrent_repository_peers_total"; + #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); + // Torrent metrics + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), @@ -28,5 +36,13 @@ pub fn describe_metrics() -> Metrics { )), ); + // Peers metrics + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers.")), + ); + metrics } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 32785cada..9832d8b2a 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -117,11 +117,7 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerAdded { - announcement: *announcement, - }) - .await; + event_sender.send(Event::PeerAdded { peer: *announcement }).await; } None @@ -141,12 +137,7 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerRemoved { - peer_addr: old_peer.peer_addr, - peer_id: old_peer.peer_id, - }) - .await; + event_sender.send(Event::PeerRemoved { peer: *old_peer.clone() }).await; } Some(old_peer) @@ -175,7 +166,7 @@ impl Swarm { if let Some(_event_sender) = self.event_sender.as_deref() { // Events can not be trigger here because retain does not allow // async closures. - removed_peers.push((peer.peer_addr, peer.peer_id)); + removed_peers.push(*peer.clone()); } } @@ -183,13 +174,8 @@ impl Swarm { }); if let Some(event_sender) = self.event_sender.as_deref() { - for (peer_addr, peer_id) in &removed_peers { - event_sender - .send(Event::PeerRemoved { - peer_addr: *peer_addr, - peer_id: *peer_id, - }) - .await; + for peer in &removed_peers { + event_sender.send(Event::PeerRemoved { peer: *peer }).await; } } From daba8a07ae957c927a6d45591549a10b12a9a582 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:13:38 +0100 Subject: [PATCH 050/247] feat: [#1358] new metric in torrent-repository: total number of downloads --- packages/primitives/src/peer.rs | 42 +++++++++++ packages/torrent-repository/Cargo.toml | 2 +- packages/torrent-repository/src/event.rs | 7 ++ .../src/statistics/event/handler.rs | 72 +++++++++++++++---- packages/torrent-repository/src/swarm.rs | 15 +++- 5 files changed, 122 insertions(+), 16 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index bd753b220..316541ad6 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -27,6 +27,7 @@ use std::ops::{Deref, DerefMut}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use derive_more::Display; use serde::Serialize; use zerocopy::FromBytes as _; @@ -34,6 +35,24 @@ use crate::DurationSinceUnixEpoch; pub type PeerAnnouncement = Peer; +#[derive(Debug, Display, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[serde(rename_all_fields = "lowercase")] +pub enum PeerRole { + Seeder, + Leecher, +} + +impl PeerRole { + /// Returns the opposite role: Seeder becomes Leecher, and vice versa. + #[must_use] + pub fn opposite(self) -> Self { + match self { + PeerRole::Seeder => PeerRole::Leecher, + PeerRole::Leecher => PeerRole::Seeder, + } + } +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -147,6 +166,7 @@ impl PartialOrd for Peer { pub trait ReadInfo { fn is_seeder(&self) -> bool; + fn is_leecher(&self) -> bool; fn get_event(&self) -> AnnounceEvent; fn get_id(&self) -> PeerId; fn get_updated(&self) -> DurationSinceUnixEpoch; @@ -158,6 +178,10 @@ impl ReadInfo for Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -180,6 +204,10 @@ impl ReadInfo for Arc { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -203,6 +231,20 @@ impl Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + #[must_use] + pub fn is_leecher(&self) -> bool { + !self.is_seeder() + } + + #[must_use] + pub fn role(&self) -> PeerRole { + if self.is_seeder() { + PeerRole::Seeder + } else { + PeerRole::Leecher + } + } + pub fn ip(&mut self) -> IpAddr { self.peer_addr.ip() } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 1c7cc09fe..26662b583 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -20,7 +20,7 @@ aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" futures = "0" -serde = "1.0.219" +serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index fecb8cd1d..69d35141f 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -16,6 +16,13 @@ pub enum Event { PeerRemoved { peer: Peer, }, + PeerUpdated { + old_peer: Peer, + new_peer: Peer, + }, + PeerDownloadCompleted { + peer: Peer, + }, } pub mod sender { diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index e869e7c1a..5bf4a2f84 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -2,11 +2,14 @@ use std::sync::Arc; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::statistics::{TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL}; +use crate::statistics::{ + TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, +}; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { @@ -35,14 +38,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerAdded { peer } => { tracing::debug!(peer = ?peer, "Peer added", ); - let label_set: LabelSet = if peer.is_seeder() { - (label_name!("peer_role"), LabelValue::new("seeder")).into() - } else { - (label_name!("peer_role"), LabelValue::new("leecher")).into() - }; - match stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await { Ok(()) => {} @@ -52,19 +49,66 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerRemoved { peer } => { tracing::debug!(peer = ?peer, "Peer removed", ); - let label_set: LabelSet = if peer.is_seeder() { - (label_name!("peer_role"), LabelValue::new("seeder")).into() - } else { - (label_name!("peer_role"), LabelValue::new("leecher")).into() + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), }; + } + Event::PeerUpdated { old_peer, new_peer } => { + tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + + if old_peer.role() != new_peer.role() { + match stats_repository + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &(label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + match stats_repository + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &(label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; + } + } + Event::PeerDownloadCompleted { peer } => { + tracing::debug!(peer = ?peer, "Peer download completed", ); match stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await { Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), }; } } } + +/// Returns the label set to be included in the metrics for the given peer. +fn label_set_for_peer(peer: &Peer) -> LabelSet { + if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + } +} diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 9832d8b2a..782726958 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -82,7 +82,7 @@ impl Swarm { if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. - // Check if the peer has changed its from leecher to seeder or vice versa. + // Check if the peer has changed from leecher to seeder or vice versa. if old_announce.is_seeder() != is_now_seeder { if is_now_seeder { self.metadata.complete += 1; @@ -99,6 +99,19 @@ impl Swarm { *downloads_increased = true; } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + old_peer: *old_announce, + new_peer: *announcement, + }) + .await; + + if *downloads_increased { + event_sender.send(Event::PeerDownloadCompleted { peer: *announcement }).await; + } + } + Some(old_announce) } else { // A new peer has been added to the swarm. From c706a1b30915f660ec09a3c28bc4a4a841536a5c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:24:14 +0100 Subject: [PATCH 051/247] refactor: [#1358] move logs --- .../src/statistics/event/handler.rs | 60 +++++-------------- .../src/statistics/repository.rs | 15 +++++ 2 files changed, 31 insertions(+), 44 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 5bf4a2f84..90df19ab6 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -16,90 +16,62 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::TorrentAdded { info_hash, .. } => { tracing::debug!(info_hash = ?info_hash, "Torrent added",); - match stats_repository + let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } Event::TorrentRemoved { info_hash } => { tracing::debug!(info_hash = ?info_hash, "Torrent removed",); - match stats_repository + let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } Event::PeerAdded { peer } => { tracing::debug!(peer = ?peer, "Peer added", ); - match stats_repository + let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } Event::PeerRemoved { peer } => { tracing::debug!(peer = ?peer, "Peer removed", ); - match stats_repository + let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } Event::PeerUpdated { old_peer, new_peer } => { tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); if old_peer.role() != new_peer.role() { - match stats_repository + let _unused = stats_repository .increment_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), - &(label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(), + &label_set_for_peer(&new_peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - } + .await; - match stats_repository + let _unused = stats_repository .decrement_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), - &(label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(), + &label_set_for_peer(&old_peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } } Event::PeerDownloadCompleted { peer } => { tracing::debug!(peer = ?peer, "Peer download completed", ); - match stats_repository + let _unused = stats_repository .increment_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } } } diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index a8cb8549e..1e376faf7 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -49,6 +49,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + result } @@ -68,6 +73,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + result } @@ -87,6 +97,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + result } } From 60c00e8bd575285f5c47e0cf8518574b527b6db7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:46:53 +0100 Subject: [PATCH 052/247] feat: [#1358] add info-hash to all torrent-repository events To know which swarm the event belongs to. --- packages/torrent-repository/src/event.rs | 4 + .../src/statistics/event/handler.rs | 20 ++-- packages/torrent-repository/src/swarm.rs | 106 +++++++++++------- packages/torrent-repository/src/swarms.rs | 11 +- .../torrent-repository/tests/swarm/mod.rs | 3 +- .../torrent-repository/tests/swarms/mod.rs | 22 ++-- 6 files changed, 104 insertions(+), 62 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 69d35141f..ac1c06637 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -11,16 +11,20 @@ pub enum Event { info_hash: InfoHash, }, PeerAdded { + info_hash: InfoHash, peer: Peer, }, PeerRemoved { + info_hash: InfoHash, peer: Peer, }, PeerUpdated { + info_hash: InfoHash, old_peer: Peer, new_peer: Peer, }, PeerDownloadCompleted { + info_hash: InfoHash, peer: Peer, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 90df19ab6..d2783f9ba 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -27,22 +27,26 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; } - Event::PeerAdded { peer } => { - tracing::debug!(peer = ?peer, "Peer added", ); + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await; } - Event::PeerRemoved { peer } => { - tracing::debug!(peer = ?peer, "Peer removed", ); + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await; } - Event::PeerUpdated { old_peer, new_peer } => { - tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); if old_peer.role() != new_peer.role() { let _unused = stats_repository @@ -62,8 +66,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .await; } } - Event::PeerDownloadCompleted { peer } => { - tracing::debug!(peer = ?peer, "Peer download completed", ); + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); let _unused = stats_repository .increment_counter( diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 782726958..3fe0e27d7 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -7,6 +7,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -15,8 +16,9 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::sender::Sender; use crate::event::Event; -#[derive(Clone, Default)] +#[derive(Clone)] pub struct Swarm { + info_hash: InfoHash, peers: BTreeMap>, metadata: SwarmMetadata, event_sender: Sender, @@ -49,8 +51,9 @@ impl Eq for Swarm {} impl Swarm { #[must_use] - pub fn new(downloaded: u32, event_sender: Sender) -> Self { + pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { Self { + info_hash: *info_hash, peers: BTreeMap::new(), metadata: SwarmMetadata::new(downloaded, 0, 0), event_sender, @@ -102,13 +105,19 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { event_sender .send(Event::PeerUpdated { + info_hash: self.info_hash, old_peer: *old_announce, new_peer: *announcement, }) .await; if *downloads_increased { - event_sender.send(Event::PeerDownloadCompleted { peer: *announcement }).await; + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *announcement, + }) + .await; } } @@ -130,7 +139,12 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender.send(Event::PeerAdded { peer: *announcement }).await; + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement, + }) + .await; } None @@ -150,7 +164,12 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender.send(Event::PeerRemoved { peer: *old_peer.clone() }).await; + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; } Some(old_peer) @@ -188,7 +207,12 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { for peer in &removed_peers { - event_sender.send(Event::PeerRemoved { peer: *peer }).await; + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *peer, + }) + .await; } } @@ -302,24 +326,25 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert!(swarm.is_empty()); } #[test] fn it_should_have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert_eq!(swarm.len(), 0); } #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -329,7 +354,7 @@ mod tests { #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -344,7 +369,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -356,7 +381,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -368,7 +393,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -380,7 +405,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -394,7 +419,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -409,7 +434,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_a_non_existing_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -418,7 +443,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() @@ -438,7 +463,7 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -455,7 +480,7 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -475,20 +500,21 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use crate::tests::sample_info_hash; use crate::Swarm; fn empty_swarm() -> Swarm { - Swarm::default() + Swarm::new(&sample_info_hash(), 0, None) } async fn not_empty_swarm() -> Swarm { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; swarm } async fn not_empty_swarm_with_downloads() -> Swarm { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); let mut downloads_increased = false; @@ -571,7 +597,7 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() @@ -589,7 +615,7 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. @@ -612,7 +638,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_metadata() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -633,7 +659,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -649,7 +675,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -669,10 +695,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); @@ -686,7 +713,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); @@ -701,7 +728,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); @@ -718,10 +745,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); @@ -737,7 +765,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -758,10 +786,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); @@ -777,7 +806,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -797,10 +826,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); @@ -820,7 +850,7 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); @@ -840,7 +870,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); @@ -858,7 +888,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index c74fec3ea..3200d77ff 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -58,11 +58,10 @@ impl Swarms { ) -> Result { let swarm_handle = match self.swarms.get(info_hash) { None => { - let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads, self.event_sender.clone()).into()) - } else { - SwarmHandle::default() - }; + let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); + + let new_swarm_handle = + SwarmHandle::new(Swarm::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -330,7 +329,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(*completed, self.event_sender.clone()).into()); + let entry = SwarmHandle::new(Swarm::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index f7ae4b439..cb4009ba9 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -3,6 +3,7 @@ use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use bittorrent_primitives::info_hash::InfoHash; use rstest::{fixture, rstest}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time as _}; @@ -16,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn swarm() -> Swarm { - Swarm::default() + Swarm::new(&InfoHash::default(), 0, None) } #[fixture] diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index d8ee354c8..780d6cd4c 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -14,6 +14,10 @@ use torrust_tracker_torrent_repository::Swarms; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +fn swarm() -> Swarm { + Swarm::new(&InfoHash::default(), 0, None) +} + #[fixture] fn swarms() -> Swarms { Swarms::default() @@ -28,26 +32,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), Swarm::default())] + vec![(InfoHash::default(), swarm())] } #[fixture] async fn started() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); swarm.handle_announcement(&a_started_peer(1)).await; vec![(InfoHash::default(), swarm)] } #[fixture] async fn completed() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); swarm.handle_announcement(&a_completed_peer(2)).await; vec![(InfoHash::default(), swarm)] } #[fixture] async fn downloaded() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); let mut peer = a_started_peer(3); swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; @@ -58,17 +62,17 @@ async fn downloaded() -> Entries { #[fixture] async fn three() -> Entries { - let mut started = Swarm::default(); + let mut started = swarm(); let started_h = &mut DefaultHasher::default(); started.handle_announcement(&a_started_peer(1)).await; started.hash(started_h); - let mut completed = Swarm::default(); + let mut completed = swarm(); let completed_h = &mut DefaultHasher::default(); completed.handle_announcement(&a_completed_peer(2)).await; completed.hash(completed_h); - let mut downloaded = Swarm::default(); + let mut downloaded = swarm(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.handle_announcement(&downloaded_peer).await; @@ -89,7 +93,7 @@ async fn many_out_of_order() -> Entries { let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { - let mut entry = Swarm::default(); + let mut entry = swarm(); entry.handle_announcement(&a_started_peer(i)).await; entries.insert((InfoHash::from(&i), entry)); @@ -104,7 +108,7 @@ async fn many_hashed_in_order() -> Entries { let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = Swarm::default(); + let mut entry = swarm(); entry.handle_announcement(&a_started_peer(i)).await; let hash: &mut DefaultHasher = &mut DefaultHasher::default(); From dfba00c7c2fe641e486d2fbeb023cd099db3e567 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 13:15:14 +0100 Subject: [PATCH 053/247] feat: [#1358] allow disabling the event sender in the torrent-repository pkg --- .../src/environment.rs | 4 +- .../axum-http-tracker-server/src/server.rs | 6 ++- .../src/v1/handlers/announce.rs | 2 +- .../src/v1/handlers/scrape.rs | 2 +- .../src/environment.rs | 4 +- packages/events/src/bus.rs | 46 ++++++++++++++----- .../http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/container.rs | 6 ++- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 6 ++- .../src/statistics/services.rs | 2 +- .../rest-tracker-api-core/src/container.rs | 4 +- .../src/statistics/services.rs | 2 +- packages/torrent-repository/src/container.rs | 7 +-- .../udp-tracker-core/benches/helpers/sync.rs | 3 +- packages/udp-tracker-core/src/container.rs | 6 ++- .../udp-tracker-core/src/services/connect.rs | 7 +-- packages/udp-tracker-server/src/container.rs | 2 +- .../udp-tracker-server/src/environment.rs | 4 +- .../src/handlers/announce.rs | 14 ++++-- .../src/handlers/connect.rs | 22 ++++++--- .../udp-tracker-server/src/handlers/mod.rs | 8 +++- .../udp-tracker-server/src/handlers/scrape.rs | 3 +- src/container.rs | 4 +- 24 files changed, 118 insertions(+), 50 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 078bda9e5..10dada2db 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -144,7 +144,9 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 3904449fa..f7d1ed7ea 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -280,7 +280,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - configuration.core.tracker_usage_statistics, + configuration.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); @@ -290,7 +290,9 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7489211a9..7d7a0b386 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -168,7 +168,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 330e7c13e..8decfe95c 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -139,7 +139,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index e4a83d15d..92ca5a2d1 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -173,7 +173,9 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs index d53f29b8d..b42fb4fc5 100644 --- a/packages/events/src/bus.rs +++ b/packages/events/src/bus.rs @@ -3,36 +3,60 @@ use std::sync::Arc; use crate::broadcaster::Broadcaster; use crate::{receiver, sender}; +#[derive(Clone, Debug)] +pub enum SenderStatus { + Enabled, + Disabled, +} + +impl From for SenderStatus { + fn from(enabled: bool) -> Self { + if enabled { + Self::Enabled + } else { + Self::Disabled + } + } +} + +impl From for bool { + fn from(sender_status: SenderStatus) -> Self { + match sender_status { + SenderStatus::Enabled => true, + SenderStatus::Disabled => false, + } + } +} + #[derive(Clone, Debug)] pub struct EventBus { - pub enable_sender: bool, + pub sender_status: SenderStatus, pub broadcaster: Broadcaster, } impl Default for EventBus { fn default() -> Self { - let enable_sender = true; + let sender_status = SenderStatus::Enabled; let broadcaster = Broadcaster::::default(); - Self::new(enable_sender, broadcaster) + Self::new(sender_status, broadcaster) } } impl EventBus { #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { + pub fn new(sender_status: SenderStatus, broadcaster: Broadcaster) -> Self { Self { - enable_sender, + sender_status, broadcaster, } } #[must_use] pub fn sender(&self) -> Option>> { - if self.enable_sender { - Some(Arc::new(self.broadcaster.clone())) - } else { - None + match self.sender_status { + SenderStatus::Enabled => Some(Arc::new(self.broadcaster.clone())), + SenderStatus::Disabled => None, } } @@ -50,14 +74,14 @@ mod tests { #[tokio::test] async fn it_should_provide_an_event_sender_when_enabled() { - let bus = EventBus::::new(true, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Enabled, Broadcaster::default()); assert!(bus.sender().is_some()); } #[tokio::test] async fn it_should_not_provide_event_sender_when_disabled() { - let bus = EventBus::::new(false, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Disabled, Broadcaster::default()); assert!(bus.sender().is_none()); } diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 7ee91a2c4..cfb3f745f 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -62,7 +62,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 922273610..f063c0061 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -27,7 +27,9 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, @@ -80,7 +82,7 @@ impl HttpTrackerCoreServices { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index e0f387273..9f39a04e4 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -256,7 +256,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 70e30099c..3da1aa88f 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -255,6 +255,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -276,7 +277,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); @@ -446,6 +447,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; @@ -468,7 +470,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 3c8a4fa43..af1e30524 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -96,7 +96,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index e9a622e04..1c4a08e26 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -36,7 +36,9 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index aad31a323..d05a35981 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -160,7 +160,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs index 50a6b8b9c..d185180b1 100644 --- a/packages/torrent-repository/src/container.rs +++ b/packages/torrent-repository/src/container.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use torrust_tracker_events::bus::SenderStatus; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{self}; @@ -15,13 +17,12 @@ pub struct TorrentRepositoryContainer { impl TorrentRepositoryContainer { #[must_use] - pub fn initialize() -> Self { + pub fn initialize(sender_status: SenderStatus) -> Self { // Torrent repository stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); - // todo: add a config option to enable/disable stats for this package - let event_bus = Arc::new(EventBus::new(true, broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(sender_status, broadcaster.clone())); let stats_event_sender = event_bus.sender(); diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 1814a865e..e8ec1ce03 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -5,6 +5,7 @@ use std::time::{Duration, Instant}; use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -16,7 +17,7 @@ pub async fn connect_once(samples: u64) -> Duration { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 2b6567ec0..07a8a09ef 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -32,7 +32,9 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, @@ -91,7 +93,7 @@ impl UdpTrackerCoreServices { let udp_core_broadcaster = Broadcaster::default(); let udp_core_stats_repository = Arc::new(Repository::new()); let event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), udp_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 18c9fd0ba..6ba36f274 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -61,6 +61,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; @@ -79,7 +80,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -100,7 +101,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -122,7 +123,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index a0bc8f35b..365db4ca7 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -39,7 +39,7 @@ impl UdpTrackerServerServices { let udp_server_broadcaster = Broadcaster::default(); let udp_server_stats_repository = Arc::new(Repository::new()); let udp_server_stats_event_bus = Arc::new(EventBus::new( - core_config.tracker_usage_statistics, + core_config.tracker_usage_statistics.into(), udp_server_broadcaster.clone(), )); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 6dae3d860..f92d5dd29 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -175,7 +175,9 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 86e7888f2..65b521f27 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -206,6 +206,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -378,7 +379,10 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = event_bus.sender(); @@ -542,6 +546,7 @@ mod tests { use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -718,11 +723,14 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 1244a6a3b..961189945 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -63,6 +63,7 @@ mod tests { use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -84,11 +85,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -123,11 +127,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -162,12 +169,15 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index d39ad0972..ca834c006 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -227,6 +227,7 @@ pub(crate) mod tests { use mockall::mock; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -287,11 +288,14 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 34d5a5ce2..e35e118b4 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -92,6 +92,7 @@ mod tests { }; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::bus::EventBus; @@ -183,7 +184,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = event_bus.sender(); diff --git a/src/container.rs b/src/container.rs index 273425fc1..98c455780 100644 --- a/src/container.rs +++ b/src/container.rs @@ -60,7 +60,9 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); // Core From 8ee258eee3f16aeceb6166185b71325263dd0ff8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 14:11:37 +0100 Subject: [PATCH 054/247] refactor: [#1358] use the new field info-hash as ID for the Swarm (Hash,PartialEq) --- packages/torrent-repository/src/swarm.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3fe0e27d7..2ad216a61 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -36,14 +36,13 @@ impl Debug for Swarm { impl Hash for Swarm { fn hash(&self, state: &mut H) { - self.peers.hash(state); - self.metadata.hash(state); + self.info_hash.hash(state); } } impl PartialEq for Swarm { fn eq(&self, other: &Self) -> bool { - self.peers == other.peers && self.metadata == other.metadata + self.info_hash == other.info_hash } } From c9a893c876546562c484131acba77034249b5008 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 14:12:01 +0100 Subject: [PATCH 055/247] refactor: [#1358] rename metrics for clarity There are two concepts: - Unique peers: phisical client with different socket address. - Peer connections: a client (peer) can particiapte in multiple swarms. Current metrics count the second, meaning the peer would be counted doubled if it particiaptes in two swarms. --- .../src/statistics/event/handler.rs | 18 +++++++++++++----- .../torrent-repository/src/statistics/mod.rs | 17 +++++++++++++---- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index d2783f9ba..2fd7271cc 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, }; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { @@ -31,14 +31,22 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await; } Event::PeerRemoved { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await; } Event::PeerUpdated { @@ -51,7 +59,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&new_peer), now, ) @@ -59,7 +67,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&old_peer), now, ) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 4deaf19cb..18dcf83ea 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,8 @@ const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_to // Peers metrics -const TORRENT_REPOSITORY_PEERS_TOTAL: &str = "torrent_repository_peers_total"; +const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; +const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet #[must_use] pub fn describe_metrics() -> Metrics { @@ -32,16 +33,24 @@ pub fn describe_metrics() -> Metrics { &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(&MetricDescription::new( - "The total number of torrent downloads since the tracker process started.", + "The total number of torrent downloads (since the tracker process started).", )), ); // Peers metrics metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers.")), + Some(&MetricDescription::new( + "The total number of peer connections (one connection per torrent).", + )), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of unique peers.")), ); metrics From 0e38707fda29f54ae8cad8e2d19b737c97d77843 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 15:43:01 +0100 Subject: [PATCH 056/247] fix: [#1358] revert Hash impl for Swarm To fix broken tests. This implementation will kept for now. I think it's only used for testing and I'm planning to remvoe all integration tests becuase now web have unit tests covering the same functionality. --- packages/torrent-repository/src/swarm.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 2ad216a61..3fe0e27d7 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -36,13 +36,14 @@ impl Debug for Swarm { impl Hash for Swarm { fn hash(&self, state: &mut H) { - self.info_hash.hash(state); + self.peers.hash(state); + self.metadata.hash(state); } } impl PartialEq for Swarm { fn eq(&self, other: &Self) -> bool { - self.info_hash == other.info_hash + self.peers == other.peers && self.metadata == other.metadata } } From 3d7e6ff04ab94f576b8aedd6663756243c6f3e55 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 17:03:02 +0100 Subject: [PATCH 057/247] test: [#1358] add tests to torrust_tracker_torrent_repository::swarm::Swarm --- Cargo.lock | 1 + packages/primitives/src/peer.rs | 18 +++ packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/event.rs | 20 +++ packages/torrent-repository/src/swarm.rs | 189 +++++++++++++++++++++++ 5 files changed, 229 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ddf163cc6..75a272292 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4858,6 +4858,7 @@ dependencies = [ "criterion", "crossbeam-skiplist", "futures", + "mockall", "rand 0.9.1", "rstest", "serde", diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 316541ad6..cd4531b09 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -252,6 +252,18 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + pub fn mark_as_completed(&mut self) { + self.event = AnnounceEvent::Completed; + } + + #[must_use] + pub fn into_completed(self) -> Self { + Self { + event: AnnounceEvent::Completed, + ..self + } + } } use std::panic::Location; @@ -520,6 +532,12 @@ pub mod fixture { self } + #[must_use] + pub fn with_event(mut self, event: AnnounceEvent) -> Self { + self.peer.event = event; + self + } + #[allow(dead_code)] #[must_use] pub fn build(self) -> Peer { diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 26662b583..98ae5817d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -33,6 +33,7 @@ tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } +mockall = "0" rand = "0" rstest = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index ac1c06637..9709da19a 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -36,6 +36,26 @@ pub mod sender { pub type Sender = Option>>; pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; + + #[cfg(test)] + pub mod tests { + + use futures::future::BoxFuture; + use mockall::mock; + use torrust_tracker_events::sender::{SendError, Sender}; + + use crate::event::Event; + + mock! { + pub EventSender {} + + impl Sender for EventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + } } pub mod receiver { diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3fe0e27d7..473703e89 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -328,6 +328,16 @@ mod tests { use crate::swarm::Swarm; use crate::tests::sample_info_hash; + #[test] + fn it_should_allow_debugging() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + + assert_eq!( + format!("{swarm:?}"), + "Swarm { peers: {}, metadata: SwarmMetadata { downloaded: 0, complete: 0, incomplete: 0 } }" + ); + } + #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { let swarm = Swarm::new(&sample_info_hash(), 0, None); @@ -689,6 +699,12 @@ mod tests { assert_eq!(leechers, 1); } + #[tokio::test] + async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + assert!(swarm.is_peerless()); + } + mod updating_the_swarm_metadata { mod when_a_new_peer_is_added { @@ -907,4 +923,177 @@ mod tests { } } } + + mod triggering_events { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::AnnounceEvent::Started; + use mockall::predicate::eq; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::MockEventSender; + use crate::event::Event; + use crate::swarm::Swarm; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_peer_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerRemoved { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + swarm.remove(&peer).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_removed_due_to_inactivity() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerRemoved { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Peers not updated after this time will be removed + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + + swarm.remove_inactive(current_cutoff).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_updated() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().with_event(Started).build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Update the peer + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_completes_a_download() { + let info_hash = sample_info_hash(); + let started_peer = PeerBuilder::leecher().with_event(Started).build(); + let completed_peer = started_peer.into_completed(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { + info_hash, + peer: started_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(started_peer.into(), &mut downloads_increased).await; + + // Announce as completed + swarm.upsert_peer(completed_peer.into(), &mut downloads_increased).await; + } + } } From f71211fedc91477058064150398b265705f6fdf0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 17:53:02 +0100 Subject: [PATCH 058/247] test: [#1358] add tests to torrust_tracker_torrent_repository::swarms::Swarms --- packages/torrent-repository/src/event.rs | 16 +- packages/torrent-repository/src/swarm.rs | 2 +- packages/torrent-repository/src/swarms.rs | 247 +++++++++++++++++++++- 3 files changed, 259 insertions(+), 6 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 9709da19a..da086f89e 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -40,8 +40,9 @@ pub mod sender { #[cfg(test)] pub mod tests { - use futures::future::BoxFuture; + use futures::future::{self, BoxFuture}; use mockall::mock; + use mockall::predicate::eq; use torrust_tracker_events::sender::{SendError, Sender}; use crate::event::Event; @@ -55,6 +56,19 @@ pub mod sender { fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } + + pub fn expect_event(mock: &mut MockEventSender, event: Event) { + mock.expect_send() + .with(eq(event)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + } + + pub fn expect_event_sequence(mock: &mut MockEventSender, event: Vec) { + for e in event { + expect_event(mock, e); + } + } } } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 473703e89..160636906 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -647,7 +647,7 @@ mod tests { } #[tokio::test] - async fn it_should_return_the_metadata() { + async fn it_should_return_the_swarm_metadata() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 3200d77ff..8b8327778 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -440,8 +440,13 @@ mod tests { mod the_swarm_repository { + use std::sync::Arc; + use aquatic_udp_protocol::PeerId; + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns /// `-qB00000000000000012`. @@ -462,14 +467,50 @@ mod tests { // The `TorrentRepository` has these responsibilities: // - To maintain the peer lists for each torrent. - // - To maintain the the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent entries. + // - To maintain the the torrent entries, which contains all the info + // about the torrents, including the peer lists. + // - To return the torrent entries (swarm handles). // - To return the peer lists for a given torrent. // - To return the torrent metrics. // - To return the swarm metadata for a given torrent. // - To handle the persistence of the torrent entries. + #[tokio::test] + async fn it_should_return_zero_length_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.len(), 0); + } + + #[tokio::test] + async fn it_should_return_the_length_when_it_has_swarms() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert_eq!(swarms.len(), 1); + } + + #[tokio::test] + async fn it_should_be_empty_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert!(swarms.is_empty()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert!(!swarms.is_empty()); + } + + #[tokio::test] + async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert!(!swarms.is_empty()); + } + mod maintaining_the_peer_lists { use std::sync::Arc; @@ -1054,6 +1095,59 @@ mod tests { "{result_a:?} {result_b:?}" ); } + + mod it_should_count_peerless_torrents { + use std::sync::Arc; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peerless_torrents() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peerless_torrents() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 1); + } + } + + mod it_should_count_peers { + use std::sync::Arc; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peers() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peers().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peer() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert_eq!(swarms.count_peers().await.unwrap(), 1); + } + } } mod returning_swarm_metadata { @@ -1102,7 +1196,7 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; use crate::swarms::Swarms; - use crate::tests::sample_info_hash; + use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { @@ -1121,6 +1215,151 @@ mod tests { // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); } + + #[tokio::test] + async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { + // code-review: do we want to allow this? + + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = PersistentTorrents::default(); + + persistent_torrents.insert(infohash, 1); + persistent_torrents.insert(infohash, 2); + + swarms.import_persistent(&persistent_torrents); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + // It takes the last value + assert_eq!(swarm_metadata.downloaded, 2); + } + + #[tokio::test] + async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + // Insert a new the torrent entry + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); + let initial_number_of_downloads = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded; + + // Try to import the torrent entry + let new_number_of_downloads = initial_number_of_downloads + 1; + let mut persistent_torrents = PersistentTorrents::default(); + persistent_torrents.insert(infohash, new_number_of_downloads); + swarms.import_persistent(&persistent_torrents); + + // The number of downloads should not be changed + assert_eq!( + swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded, + initial_number_of_downloads + ); + } + } + } + + mod triggering_events { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarms::Swarms; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_torrent_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_torrent_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + swarms.remove(&info_hash).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peerless_torrent_is_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::PeerRemoved { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + // Add the new torrent + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Remove the peer + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + // Remove peerless torrents + + let tracker_policy = torrust_tracker_configuration::TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); } } } From b13797e768ea79fd071a47dd6cbb710f11a22a21 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 19:33:26 +0100 Subject: [PATCH 059/247] test: [#1358] add tests for events in torrent-repository pkg --- packages/primitives/src/peer.rs | 46 ++- packages/torrent-repository/src/event.rs | 26 ++ .../src/statistics/event/handler.rs | 336 ++++++++++++++++++ 3 files changed, 406 insertions(+), 2 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index cd4531b09..20ddd3074 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -22,12 +22,13 @@ //! }; //! ``` +use std::fmt; use std::net::{IpAddr, SocketAddr}; use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use derive_more::Display; use serde::Serialize; use zerocopy::FromBytes as _; @@ -35,7 +36,7 @@ use crate::DurationSinceUnixEpoch; pub type PeerAnnouncement = Peer; -#[derive(Debug, Display, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Copy, Clone, PartialEq, Eq, Hash)] #[serde(rename_all_fields = "lowercase")] pub enum PeerRole { Seeder, @@ -53,6 +54,39 @@ impl PeerRole { } } +impl fmt::Display for PeerRole { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PeerRole::Seeder => write!(f, "seeder"), + PeerRole::Leecher => write!(f, "leecher"), + } + } +} + +impl FromStr for PeerRole { + type Err = ParsePeerRoleError; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "seeder" => Ok(PeerRole::Seeder), + "leecher" => Ok(PeerRole::Leecher), + _ => Err(ParsePeerRoleError::InvalidPeerRole { + location: Location::caller(), + raw_param: s.to_string(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParsePeerRoleError { + #[error("invalid param {raw_param} in {location}")] + InvalidPeerRole { + location: &'static Location<'static>, + raw_param: String, + }, +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -264,6 +298,14 @@ impl Peer { ..self } } + + #[must_use] + pub fn into_seeder(self) -> Self { + Self { + left: NumberOfBytes::new(0), + ..self + } + } } use std::panic::Location; diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index da086f89e..65a65ce8c 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -83,3 +83,29 @@ pub mod bus { pub type EventBus = torrust_tracker_events::bus::EventBus; } + +#[cfg(test)] +pub mod test { + + use torrust_tracker_primitives::peer::Peer; + + use super::Event; + use crate::tests::sample_info_hash; + + #[test] + fn events_should_be_comparable() { + let info_hash = sample_info_hash(); + + let event1 = Event::TorrentAdded { + info_hash, + announcement: Peer::default(), + }; + + let event2 = Event::TorrentRemoved { info_hash }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2fd7271cc..2b61839b8 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -13,6 +13,7 @@ use crate::statistics::{ pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { + // Torrent events Event::TorrentAdded { info_hash, .. } => { tracing::debug!(info_hash = ?info_hash, "Torrent added",); @@ -27,6 +28,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; } + + // Peer events Event::PeerAdded { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); @@ -96,3 +99,336 @@ fn label_set_for_peer(peer: &Peer) -> LabelSet { (label_name!("peer_role"), LabelValue::new("leecher")).into() } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric::MetricName; + use torrust_tracker_primitives::peer::{Peer, PeerRole}; + + use crate::statistics::repository::Repository; + use crate::tests::{leecher, seeder}; + + fn make_peer(role: PeerRole) -> Peer { + match role { + PeerRole::Seeder => seeder(), + PeerRole::Leecher => leecher(), + } + } + + // It returns a peer with the opposite role of the given peer. + fn make_opposite_role_peer(peer: &Peer) -> Peer { + let mut opposite_role_peer = *peer; + + match peer.role() { + PeerRole::Seeder => { + opposite_role_peer.left = NumberOfBytes::new(1); + } + PeerRole::Leecher => { + opposite_role_peer.left = NumberOfBytes::new(0); + } + } + + opposite_role_peer + } + + async fn expect_counter_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: u64, + ) { + let value = get_counter_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_counter_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> u64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get counter value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + async fn expect_gauge_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: f64, + ) { + let value = get_gauge_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_gauge_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> f64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_gauge_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get gauge value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + mod for_torrent_metrics { + + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::expect_gauge_metric_to_be; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &LabelSet::default(), + 1.0, + ) + .await; + } + + #[tokio::test] + async fn it_should_decrement_the_number_of_torrents_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL); + let label_set = LabelSet::default(); + + // Increment the gauge first to simulate a torrent being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + } + + mod for_peer_metrics { + + mod peer_connections_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{ + expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, + }; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_peer_connections_when_a_peer_added_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 1.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_decrement_the_number_of_peer_connections_when_a_peer_removed_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + // Increment the gauge first to simulate a peer being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_adjust_the_number_of_seeders_and_leechers_when_a_peer_updated_event_is_received_and_the_peer_changed_its_role( + #[case] old_role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let old_peer = make_peer(old_role); + let new_peer = make_opposite_role_peer(&old_peer); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); + let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); + + // Increment the gauge first by simulating a peer was added. + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer: old_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let old_role_total = get_gauge_metric(&stats_repository, &metric_name, &old_role_label_set).await; + let new_role_total = 0.0; + + // The peer's role has changed, so we need to increment the new + // role and decrement the old one. + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer, + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + // The peer's role has changed, so the new role has incremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &new_role_label_set, new_role_total + 1.0).await; + + // And the old role has decremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &old_role_label_set, old_role_total - 1.0).await; + } + } + + mod torrent_downloads_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_downloads_when_a_peer_downloaded_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerDownloadCompleted { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be(&stats_repository, &metric_name, &label_set, 1).await; + } + } + } +} From 47d1eab5a7328b8a524d9bbcabc3b3bc4ddce6b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 15 May 2025 19:09:43 +0100 Subject: [PATCH 060/247] refactor: [#1358] Swarm tests to use new mock helpers --- packages/torrent-repository/src/swarm.rs | 111 ++++++++--------------- 1 file changed, 39 insertions(+), 72 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 160636906..3277cad8d 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -926,15 +926,13 @@ mod tests { mod triggering_events { - use std::future; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent::Started; - use mockall::predicate::eq; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::event::sender::tests::MockEventSender; + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; use crate::swarm::Swarm; use crate::tests::sample_info_hash; @@ -946,11 +944,7 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); @@ -965,17 +959,10 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerRemoved { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -993,17 +980,10 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerRemoved { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -1024,21 +1004,17 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerUpdated { - info_hash, - old_peer: peer, - new_peer: peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { info_hash, peer }, + Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + }, + ], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -1058,33 +1034,24 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { - info_hash, - peer: started_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerUpdated { - info_hash, - old_peer: started_peer, - new_peer: completed_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerDownloadCompleted { - info_hash, - peer: completed_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { + info_hash, + peer: started_peer, + }, + Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + }, + Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + }, + ], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); From b3b0b71396bebb0916a47f4833313b473260f59d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 15 May 2025 21:51:44 +0100 Subject: [PATCH 061/247] refactor: [#1358] Swarm, cleaning upsert_peer method --- packages/primitives/src/peer.rs | 5 + packages/torrent-repository/src/swarm.rs | 111 +++++++++++++---------- 2 files changed, 67 insertions(+), 49 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 20ddd3074..57ca3909d 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -270,6 +270,11 @@ impl Peer { !self.is_seeder() } + #[must_use] + pub fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } + #[must_use] pub fn role(&self) -> PeerRole { if self.is_seeder() { diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3277cad8d..d01f79fe8 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -73,21 +73,39 @@ impl Swarm { downloads_increased } - pub async fn upsert_peer( + async fn upsert_peer( &mut self, incoming_announce: Arc, downloads_increased: &mut bool, ) -> Option> { - let is_now_seeder = incoming_announce.is_seeder(); - let has_completed = incoming_announce.event == AnnounceEvent::Completed; let announcement = incoming_announce.clone(); - if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - // A peer has been updated in the swarm. + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + *downloads_increased = self.update_metadata(Some(&previous_announce), &announcement); - // Check if the peer has changed from leecher to seeder or vice versa. - if old_announce.is_seeder() != is_now_seeder { - if is_now_seeder { + self.trigger_peer_updated_event(&previous_announce, &announcement, *downloads_increased) + .await; + + Some(previous_announce) + } else { + *downloads_increased = self.update_metadata(None, &announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + fn update_metadata( + &mut self, + opt_previous_announce: Option<&Arc>, + new_announce: &Arc, + ) -> bool { + let mut downloads_increased = false; + + if let Some(previous_announce) = opt_previous_announce { + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { self.metadata.complete += 1; self.metadata.incomplete -= 1; } else { @@ -96,58 +114,53 @@ impl Swarm { } } - // Check if the peer has completed downloading the torrent. - if has_completed && old_announce.event != AnnounceEvent::Completed { + if new_announce.is_completed() && !previous_announce.is_completed() { self.metadata.downloaded += 1; - *downloads_increased = true; + downloads_increased = true; } - - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerUpdated { - info_hash: self.info_hash, - old_peer: *old_announce, - new_peer: *announcement, - }) - .await; - - if *downloads_increased { - event_sender - .send(Event::PeerDownloadCompleted { - info_hash: self.info_hash, - peer: *announcement, - }) - .await; - } - } - - Some(old_announce) + } else if new_announce.is_seeder() { + self.metadata.complete += 1; } else { - // A new peer has been added to the swarm. - - // Check if the peer is a seeder or a leecher. - if is_now_seeder { - self.metadata.complete += 1; - } else { - self.metadata.incomplete += 1; - } + self.metadata.incomplete += 1; + } - // Check if the peer has completed downloading the torrent. - if has_completed { - // Don't increment `downloaded` here: we only count transitions - // from a known peer - } + downloads_increased + } - if let Some(event_sender) = self.event_sender.as_deref() { + async fn trigger_peer_updated_event( + &self, + old_announce: &Arc, + new_announce: &Arc, + downloads_increased: bool, + ) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + + if downloads_increased { event_sender - .send(Event::PeerAdded { + .send(Event::PeerDownloadCompleted { info_hash: self.info_hash, - peer: *announcement, + peer: *new_announce.clone(), }) .await; } + } + } - None + async fn trigger_peer_added_event(&self, announcement: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; } } From d154b2aa045063c807deb0a6a88fad55297e46b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 08:51:09 +0100 Subject: [PATCH 062/247] refactor: [#1358] clean Swarm type --- packages/torrent-repository/src/swarm.rs | 308 +++++++++++------------ 1 file changed, 148 insertions(+), 160 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d01f79fe8..8cf2982e6 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -67,169 +67,20 @@ impl Swarm { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await } - AnnounceEvent::Stopped => self.remove(incoming_announce).await, + AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, }; downloads_increased } - async fn upsert_peer( - &mut self, - incoming_announce: Arc, - downloads_increased: &mut bool, - ) -> Option> { - let announcement = incoming_announce.clone(); - - if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - *downloads_increased = self.update_metadata(Some(&previous_announce), &announcement); - - self.trigger_peer_updated_event(&previous_announce, &announcement, *downloads_increased) - .await; - - Some(previous_announce) - } else { - *downloads_increased = self.update_metadata(None, &announcement); - - self.trigger_peer_added_event(&announcement).await; - - None - } - } - - fn update_metadata( - &mut self, - opt_previous_announce: Option<&Arc>, - new_announce: &Arc, - ) -> bool { - let mut downloads_increased = false; - - if let Some(previous_announce) = opt_previous_announce { - if previous_announce.role() != new_announce.role() { - if new_announce.is_seeder() { - self.metadata.complete += 1; - self.metadata.incomplete -= 1; - } else { - self.metadata.complete -= 1; - self.metadata.incomplete += 1; - } - } - - if new_announce.is_completed() && !previous_announce.is_completed() { - self.metadata.downloaded += 1; - downloads_increased = true; - } - } else if new_announce.is_seeder() { - self.metadata.complete += 1; - } else { - self.metadata.incomplete += 1; - } - - downloads_increased - } - - async fn trigger_peer_updated_event( - &self, - old_announce: &Arc, - new_announce: &Arc, - downloads_increased: bool, - ) { - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerUpdated { - info_hash: self.info_hash, - old_peer: *old_announce.clone(), - new_peer: *new_announce.clone(), - }) - .await; - - if downloads_increased { - event_sender - .send(Event::PeerDownloadCompleted { - info_hash: self.info_hash, - peer: *new_announce.clone(), - }) - .await; - } - } - } - - async fn trigger_peer_added_event(&self, announcement: &Arc) { - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerAdded { - info_hash: self.info_hash, - peer: *announcement.clone(), - }) - .await; - } - } - - pub async fn remove(&mut self, peer_to_remove: &Peer) -> Option> { - match self.peers.remove(&peer_to_remove.peer_addr) { - Some(old_peer) => { - // A peer has been removed from the swarm. - - // Check if the peer was a seeder or a leecher. - if old_peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerRemoved { - info_hash: self.info_hash, - peer: *old_peer.clone(), - }) - .await; - } - - Some(old_peer) - } - None => None, - } - } - pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { - let mut number_of_peers_removed = 0; - let mut removed_peers = Vec::new(); - - self.peers.retain(|_key, peer| { - let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; - - if !is_active { - // Update the metadata when removing a peer. - if peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - number_of_peers_removed += 1; - - if let Some(_event_sender) = self.event_sender.as_deref() { - // Events can not be trigger here because retain does not allow - // async closures. - removed_peers.push(*peer.clone()); - } - } + let peers_to_remove = self.inactive_peers(current_cutoff); - is_active - }); - - if let Some(event_sender) = self.event_sender.as_deref() { - for peer in &removed_peers { - event_sender - .send(Event::PeerRemoved { - info_hash: self.info_hash, - peer: *peer, - }) - .await; - } + for peer_addr in &peers_to_remove { + self.remove_peer(peer_addr).await; } - number_of_peers_removed + peers_to_remove.len() } #[must_use] @@ -316,6 +167,57 @@ impl Swarm { !self.should_be_removed(policy) } + async fn upsert_peer( + &mut self, + incoming_announce: Arc, + downloads_increased: &mut bool, + ) -> Option> { + let announcement = incoming_announce.clone(); + + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + *downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + + self.trigger_peer_updated_event(&previous_announce, &announcement).await; + + if *downloads_increased { + self.trigger_peer_download_completed_event(&announcement).await; + } + + Some(previous_announce) + } else { + *downloads_increased = false; + + self.update_metadata_on_insert(&announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + async fn remove_peer(&mut self, peer_addr: &SocketAddr) -> Option> { + if let Some(old_peer) = self.peers.remove(peer_addr) { + self.update_metadata_on_removal(&old_peer); + + self.trigger_peer_removed_event(&old_peer).await; + + Some(old_peer) + } else { + None + } + } + + #[must_use] + fn inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Vec { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .map(|(addr, _)| *addr) + .collect() + } + + /// Returns true if the swarm should be removed according to the retention + /// policy. fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { // If the policy is to remove peerless torrents and the swarm is empty (no peers), (policy.remove_peerless_torrents && self.is_empty()) @@ -325,6 +227,92 @@ impl Swarm { // See https://github.com/torrust/torrust-tracker/issues/1502) && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) } + + fn update_metadata_on_insert(&mut self, added_peer: &Arc) { + if added_peer.is_seeder() { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + } + + fn update_metadata_on_removal(&mut self, removed_peer: &Arc) { + if removed_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + fn update_metadata_on_update( + &mut self, + previous_announce: &Arc, + new_announce: &Arc, + ) -> bool { + let mut downloads_increased = false; + + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + if new_announce.is_completed() && !previous_announce.is_completed() { + self.metadata.downloaded += 1; + downloads_increased = true; + } + + downloads_increased + } + + async fn trigger_peer_added_event(&self, announcement: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; + } + } + + async fn trigger_peer_removed_event(&self, old_peer: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; + } + } + + async fn trigger_peer_updated_event(&self, old_announce: &Arc, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + } + } + + async fn trigger_peer_download_completed_event(&self, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *new_announce.clone(), + }) + .await; + } + } } #[cfg(test)] @@ -435,7 +423,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer).await; + swarm.remove_peer(&peer.peer_addr).await; assert!(swarm.is_empty()); } @@ -449,7 +437,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - let old = swarm.remove(&peer).await; + let old = swarm.remove_peer(&peer.peer_addr).await; assert_eq!(old, Some(Arc::new(peer))); assert_eq!(swarm.get(&peer.peer_addr), None); @@ -461,7 +449,7 @@ mod tests { let peer = PeerBuilder::default().build(); - assert_eq!(swarm.remove(&peer).await, None); + assert_eq!(swarm.remove_peer(&peer.peer_addr).await, None); } #[tokio::test] @@ -787,7 +775,7 @@ mod tests { let leechers = swarm.metadata().leechers(); - swarm.remove(&leecher).await; + swarm.remove_peer(&leecher.peer_addr).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -803,7 +791,7 @@ mod tests { let seeders = swarm.metadata().seeders(); - swarm.remove(&seeder).await; + swarm.remove_peer(&seeder.peer_addr).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -983,7 +971,7 @@ mod tests { let mut downloads_increased = false; swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer).await; + swarm.remove_peer(&peer.peer_addr).await; } #[tokio::test] From 52ac171063270edfb9b63549ae848157acd258da Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 10:12:18 +0100 Subject: [PATCH 063/247] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 32 packages to latest compatible versions Updating bitflags v2.9.0 -> v2.9.1 Updating cc v1.2.21 -> v1.2.22 Updating clap v4.5.37 -> v4.5.38 Updating clap_builder v4.5.37 -> v4.5.38 Updating errno v0.3.11 -> v0.3.12 Updating getrandom v0.3.2 -> v0.3.3 Updating icu_collections v1.5.0 -> v2.0.0 Adding icu_locale_core v2.0.0 Removing icu_locid v1.5.0 Removing icu_locid_transform v1.5.0 Removing icu_locid_transform_data v1.5.1 Updating icu_normalizer v1.5.0 -> v2.0.0 Updating icu_normalizer_data v1.5.1 -> v2.0.0 Updating icu_properties v1.5.1 -> v2.0.0 Updating icu_properties_data v1.5.1 -> v2.0.0 Updating icu_provider v1.5.0 -> v2.0.0 Removing icu_provider_macros v1.5.0 Updating idna_adapter v1.2.0 -> v1.2.1 Updating libloading v0.8.6 -> v0.8.7 Updating litemap v0.7.5 -> v0.8.0 Updating multimap v0.10.0 -> v0.10.1 Updating owo-colors v4.2.0 -> v4.2.1 Adding potential_utf v0.1.2 Updating rustls-webpki v0.103.2 -> v0.103.3 Updating tempfile v3.19.1 -> v3.20.0 Updating tinystr v0.7.6 -> v0.8.1 Updating tower-http v0.6.2 -> v0.6.4 Removing utf16_iter v1.0.5 Updating windows-core v0.61.0 -> v0.61.1 Updating windows-result v0.3.2 -> v0.3.3 Updating windows-strings v0.4.0 -> v0.4.1 Removing write16 v1.0.0 Updating writeable v0.5.5 -> v0.6.1 Updating yoke v0.7.5 -> v0.8.0 Updating yoke-derive v0.7.5 -> v0.8.0 Adding zerotrie v0.2.2 Updating zerovec v0.10.4 -> v0.11.2 Updating zerovec-derive v0.10.3 -> v0.11.1 ``` --- Cargo.lock | 236 ++++++++++++++++++++++++----------------------------- 1 file changed, 106 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75a272292..ab898e327 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -541,7 +541,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.13.0", @@ -567,9 +567,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bittorrent-http-tracker-core" @@ -957,9 +957,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.21" +version = "1.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" +checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" dependencies = [ "jobserver", "libc", @@ -1050,9 +1050,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.37" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" +checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" dependencies = [ "clap_builder", "clap_derive", @@ -1060,9 +1060,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.37" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" +checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" dependencies = [ "anstream", "anstyle", @@ -1457,9 +1457,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", "windows-sys 0.59.0", @@ -1825,9 +1825,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", @@ -2138,21 +2138,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2161,31 +2162,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -2193,67 +2174,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -2273,9 +2241,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -2386,7 +2354,7 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "libc", ] @@ -2423,12 +2391,12 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] @@ -2443,7 +2411,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "libc", "redox_syscall 0.5.12", ] @@ -2484,9 +2452,9 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "local-ip-address" @@ -2630,9 +2598,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" dependencies = [ "serde", ] @@ -2689,7 +2657,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.9.0", + "bitflags 2.9.1", "bitvec", "btoi", "byteorder", @@ -2857,7 +2825,7 @@ version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "foreign-types", "libc", @@ -2903,9 +2871,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "owo-colors" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1036865bb9422d3300cf723f657c2851d0e9ab12567854b1f4eba3d77decf564" +checksum = "26995317201fa17f3656c36716aed4a7c81743a9634ac4c99c0eeda495db0cec" [[package]] name = "parking" @@ -3125,6 +3093,15 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3369,7 +3346,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", ] [[package]] @@ -3407,7 +3384,7 @@ version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -3588,7 +3565,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a22715a5d6deef63c637207afbe68d0c72c3f8d0022d7cf9714c442d6157606b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3639,7 +3616,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.4.15", @@ -3652,7 +3629,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", @@ -3705,9 +3682,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.2" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "ring", "rustls-pki-types", @@ -3777,7 +3754,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -3790,7 +3767,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -4159,7 +4136,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4199,12 +4176,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.19.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", "rustix 1.0.7", "windows-sys 0.59.0", @@ -4357,9 +4334,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -4945,12 +4922,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-core", "http", @@ -5127,12 +5104,6 @@ dependencies = [ "serde", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5151,7 +5122,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "rand 0.9.1", ] @@ -5327,15 +5298,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.0", + "windows-strings 0.4.1", ] [[package]] @@ -5379,9 +5350,9 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" dependencies = [ "windows-link", ] @@ -5397,9 +5368,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" dependencies = [ "windows-link", ] @@ -5565,20 +5536,14 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -5607,9 +5572,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -5619,9 +5584,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", @@ -5697,11 +5662,22 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -5710,9 +5686,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", From 8d3b948ec3218e09f5187674866b969b5ef73af3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:13:37 +0100 Subject: [PATCH 064/247] tests: [#1504] remove integration tests from torrent-repository pacakge All features are now covered by unit tests. --- .../torrent-repository/tests/common/mod.rs | 1 - .../tests/common/torrent_peer_builder.rs | 106 ---- .../torrent-repository/tests/integration.rs | 22 - .../torrent-repository/tests/swarm/mod.rs | 397 ------------- .../torrent-repository/tests/swarms/mod.rs | 524 ------------------ 5 files changed, 1050 deletions(-) delete mode 100644 packages/torrent-repository/tests/common/mod.rs delete mode 100644 packages/torrent-repository/tests/common/torrent_peer_builder.rs delete mode 100644 packages/torrent-repository/tests/integration.rs delete mode 100644 packages/torrent-repository/tests/swarm/mod.rs delete mode 100644 packages/torrent-repository/tests/swarms/mod.rs diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs deleted file mode 100644 index c77ca2769..000000000 --- a/packages/torrent-repository/tests/common/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs deleted file mode 100644 index 0c065e670..000000000 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use crate::CurrentClock; - -#[derive(Debug, Default)] -struct TorrentPeerBuilder { - peer: peer::Peer, -} - -#[allow(dead_code)] -impl TorrentPeerBuilder { - #[must_use] - fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - #[must_use] - fn with_event_started(mut self) -> Self { - self.peer.event = AnnounceEvent::Started; - self - } - - #[must_use] - fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - fn into(self) -> peer::Peer { - self.peer - } -} - -/// A torrent seeder is a peer with 0 bytes left to download which -/// has not announced it has stopped -#[allow(clippy::cast_sign_loss)] -#[allow(clippy::cast_possible_truncation)] -#[must_use] -pub fn a_completed_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); - - TorrentPeerBuilder::new() - .with_number_of_bytes_left(0) - .with_event_completed() - .with_peer_id(*peer_id) - .with_peer_address(peer_addr) - .into() -} - -/// A torrent leecher is a peer that is not a seeder. -/// Leecher: left > 0 OR event = Stopped -/// -/// # Panics -/// -/// This function panics if proved id can't be converted into a valid socket address port. -/// -/// The `id` argument is used to identify the peer in both the `peer_id` and the `peer_addr`. -#[allow(clippy::cast_sign_loss)] -#[allow(clippy::cast_possible_truncation)] -#[must_use] -pub fn a_started_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); - - TorrentPeerBuilder::new() - .with_number_of_bytes_left(1) - .with_event_started() - .with_peer_id(*peer_id) - .with_peer_address(peer_addr) - .into() -} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs deleted file mode 100644 index b3e057075..000000000 --- a/packages/torrent-repository/tests/integration.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! Integration tests. -//! -//! ```text -//! cargo test --test integration -//! ``` - -use torrust_tracker_clock::clock; - -pub mod common; -mod swarm; -mod swarms; - -/// This code needs to be copied into each crate. -/// Working version, for production. -#[cfg(not(test))] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Working; - -/// Stopped version, for testing. -#[cfg(test)] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs deleted file mode 100644 index cb4009ba9..000000000 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ /dev/null @@ -1,397 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::ops::Sub; -use std::time::Duration; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use bittorrent_primitives::info_hash::InfoHash; -use rstest::{fixture, rstest}; -use torrust_tracker_clock::clock::stopped::Stopped as _; -use torrust_tracker_clock::clock::{self, Time as _}; -use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; -use torrust_tracker_primitives::peer; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::Swarm; - -use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; -use crate::CurrentClock; - -#[fixture] -fn swarm() -> Swarm { - Swarm::new(&InfoHash::default(), 0, None) -} - -#[fixture] -fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(0, false, false) -} - -#[fixture] -fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, false) -} - -#[fixture] -fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(0, false, true) -} - -#[fixture] -fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, true) -} - -pub enum Makes { - Empty, - Started, - Completed, - Downloaded, - Three, -} - -async fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { - match makes { - Makes::Empty => vec![], - Makes::Started => { - let peer = a_started_peer(1); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Completed => { - let peer = a_completed_peer(2); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Downloaded => { - let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer).await; - peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Three => { - let peer_1 = a_started_peer(1); - swarm.handle_announcement(&peer_1).await; - - let peer_2 = a_completed_peer(2); - swarm.handle_announcement(&peer_2).await; - - let mut peer_3 = a_started_peer(3); - swarm.handle_announcement(&peer_3).await; - peer_3.event = AnnounceEvent::Completed; - peer_3.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer_3).await; - vec![peer_1, peer_2, peer_3] - } - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[tokio::test] -async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - assert_eq!(swarm.len(), 0); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(swarm())] mut swarm: Swarm, - #[case] makes: &Makes, - #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, -) { - make(&mut swarm, makes).await; - - let has_peers = !swarm.is_empty(); - let has_downloads = swarm.metadata().downloaded != 0; - - match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { - // remove torrents without peers, and keep completed download stats - (true, true) => match (has_peers, has_downloads) { - // no peers, but has downloads - // peers, with or without downloads - (false, true) | (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - // no peers and no downloads - (false, false) => assert!(!swarm.meets_retaining_policy(&policy)), - }, - // remove torrents without peers and drop completed download stats - (true, false) => match (has_peers, has_downloads) { - // peers, with or without downloads - (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - // no peers and with or without downloads - (false, true | false) => assert!(!swarm.meets_retaining_policy(&policy)), - }, - // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - - let torrent_peers = swarm.peers(None); - - assert_eq!(torrent_peers.len(), peers.len()); - - for peer in torrent_peers { - assert!(peers.contains(&peer)); - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - // Make and insert a new peer. - let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer).await; - - // Get the Inserted Peer by Id. - let peers = swarm.peers(None); - let original = peers - .iter() - .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) - .expect("it should find peer by id"); - - assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); - - // Announce "Completed" torrent download event. - peer.event = AnnounceEvent::Completed; - swarm.handle_announcement(&peer).await; - - // Get the Updated Peer by Id. - let peers = swarm.peers(None); - let updated = peers - .iter() - .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) - .expect("it should find peer by id"); - - assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - use torrust_tracker_primitives::peer::ReadInfo as _; - - make(&mut swarm, makes).await; - - let mut peer = a_started_peer(-1); - - swarm.handle_announcement(&peer).await; - - // The started peer should be inserted. - let peers = swarm.peers(None); - let original = peers - .iter() - .find(|p| p.get_id() == peer.get_id()) - .expect("it should find peer by id"); - - assert_eq!(original.event, AnnounceEvent::Started); - - // Change peer to "Stopped" and insert. - peer.event = AnnounceEvent::Stopped; - swarm.handle_announcement(&peer).await; - - // It should be removed now. - let peers = swarm.peers(None); - - assert_eq!( - peers.iter().find(|p| p.get_id() == peer.get_id()), - None, - "it should be removed" - ); -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(swarm())] mut torrent: Swarm, - #[case] makes: &Makes, -) { - make(&mut torrent, makes).await; - let downloaded = torrent.metadata().downloaded; - - let peers = torrent.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let is_already_completed = peer.event == AnnounceEvent::Completed; - - // Announce "Completed" torrent download event. - peer.event = AnnounceEvent::Completed; - - torrent.handle_announcement(&peer).await; - let stats = torrent.metadata(); - - if is_already_completed { - assert_eq!(stats.downloaded, downloaded); - } else { - assert_eq!(stats.downloaded, downloaded + 1); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let is_already_non_left = peer.left == NumberOfBytes::new(0); - - // Set Bytes Left to Zero - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - let stats = swarm.metadata(); - - if is_already_non_left { - // it was already complete - assert_eq!(stats.complete, completed); - } else { - // now it is complete - assert_eq!(stats.complete, completed + 1); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let completed_already = peer.left == NumberOfBytes::new(0); - - // Set Bytes Left to no Zero - peer.left = NumberOfBytes::new(1); - swarm.handle_announcement(&peer).await; - let stats = swarm.metadata(); - - if completed_already { - // now it is incomplete - assert_eq!(stats.incomplete, incomplete + 1); - } else { - // was already incomplete - assert_eq!(stats.incomplete, incomplete); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); - - // for this test, we should not already use this socket. - assert_ne!(peer.peer_addr, socket); - - // it should get the peer as it dose not share the socket. - assert!(swarm.peers_excluding(&socket, None).contains(&peer.into())); - - // set the address to the socket. - peer.peer_addr = socket; - swarm.handle_announcement(&peer).await; // Add peer - - // It should not include the peer that has the same socket. - assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let peer = a_started_peer(peer_number); - swarm.handle_announcement(&peer).await; - } - - let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - const TIMEOUT: Duration = Duration::from_secs(120); - const EXPIRE: Duration = Duration::from_secs(121); - - let peers = make(&mut swarm, makes).await; - - let mut peer = a_completed_peer(-1); - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - peer.updated = now.sub(EXPIRE); - - swarm.handle_announcement(&peer).await; - - assert_eq!(swarm.len(), peers.len() + 1); - - let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - swarm.remove_inactive(current_cutoff).await; - - assert_eq!(swarm.len(), peers.len()); -} diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs deleted file mode 100644 index 780d6cd4c..000000000 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ /dev/null @@ -1,524 +0,0 @@ -use std::collections::{BTreeMap, HashSet}; -use std::hash::{DefaultHasher, Hash, Hasher}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use bittorrent_primitives::info_hash::InfoHash; -use futures::future::join_all; -use rstest::{fixture, rstest}; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::Swarms; - -use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; - -fn swarm() -> Swarm { - Swarm::new(&InfoHash::default(), 0, None) -} - -#[fixture] -fn swarms() -> Swarms { - Swarms::default() -} - -type Entries = Vec<(InfoHash, Swarm)>; - -#[fixture] -fn empty() -> Entries { - vec![] -} - -#[fixture] -fn default() -> Entries { - vec![(InfoHash::default(), swarm())] -} - -#[fixture] -async fn started() -> Entries { - let mut swarm = swarm(); - swarm.handle_announcement(&a_started_peer(1)).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn completed() -> Entries { - let mut swarm = swarm(); - swarm.handle_announcement(&a_completed_peer(2)).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn downloaded() -> Entries { - let mut swarm = swarm(); - let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer).await; - peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn three() -> Entries { - let mut started = swarm(); - let started_h = &mut DefaultHasher::default(); - started.handle_announcement(&a_started_peer(1)).await; - started.hash(started_h); - - let mut completed = swarm(); - let completed_h = &mut DefaultHasher::default(); - completed.handle_announcement(&a_completed_peer(2)).await; - completed.hash(completed_h); - - let mut downloaded = swarm(); - let downloaded_h = &mut DefaultHasher::default(); - let mut downloaded_peer = a_started_peer(3); - downloaded.handle_announcement(&downloaded_peer).await; - downloaded_peer.event = AnnounceEvent::Completed; - downloaded_peer.left = NumberOfBytes::new(0); - downloaded.handle_announcement(&downloaded_peer).await; - downloaded.hash(downloaded_h); - - vec![ - (InfoHash::from(&started_h.clone()), started), - (InfoHash::from(&completed_h.clone()), completed), - (InfoHash::from(&downloaded_h.clone()), downloaded), - ] -} - -#[fixture] -async fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); - - for i in 0..408 { - let mut entry = swarm(); - entry.handle_announcement(&a_started_peer(i)).await; - - entries.insert((InfoHash::from(&i), entry)); - } - - // we keep the random order from the hashed set for the vector. - entries.iter().map(|(i, e)| (*i, e.clone())).collect() -} - -#[fixture] -async fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); - - for i in 0..408 { - let mut entry = swarm(); - entry.handle_announcement(&a_started_peer(i)).await; - - let hash: &mut DefaultHasher = &mut DefaultHasher::default(); - hash.write_i32(i); - - entries.insert(InfoHash::from(&hash.clone()), entry); - } - - // We return the entries in-order from from the b-tree map. - entries.iter().map(|(i, e)| (*i, e.clone())).collect() -} - -#[fixture] -fn persistent_empty() -> PersistentTorrents { - PersistentTorrents::default() -} - -#[fixture] -fn persistent_single() -> PersistentTorrents { - let hash = &mut DefaultHasher::default(); - - hash.write_u8(1); - let t = [(InfoHash::from(&hash.clone()), 0_u32)]; - - t.iter().copied().collect() -} - -#[fixture] -fn persistent_three() -> PersistentTorrents { - let hash = &mut DefaultHasher::default(); - - hash.write_u8(1); - let info_1 = InfoHash::from(&hash.clone()); - hash.write_u8(2); - let info_2 = InfoHash::from(&hash.clone()); - hash.write_u8(3); - let info_3 = InfoHash::from(&hash.clone()); - - let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; - - t.iter().copied().collect() -} - -fn make(swarms: &Swarms, entries: &Entries) { - for (info_hash, swarm) in entries { - swarms.insert(info_hash, swarm.clone()); - } -} - -#[fixture] -fn paginated_limit_zero() -> Pagination { - Pagination::new(0, 0) -} - -#[fixture] -fn paginated_limit_one() -> Pagination { - Pagination::new(0, 1) -} - -#[fixture] -fn paginated_limit_one_offset_one() -> Pagination { - Pagination::new(1, 1) -} - -#[fixture] -fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(0, false, false) -} - -#[fixture] -fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, false) -} - -#[fixture] -fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(0, false, true) -} - -#[fixture] -fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, true) -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { - make(&repo, &entries); - - if let Some((info_hash, swarm)) = entries.first() { - assert_eq!(Some(repo.get(info_hash).unwrap().lock().await.clone()), Some(swarm.clone())); - } else { - assert!(repo.get(&InfoHash::default()).is_none()); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(swarms())] repo: Swarms, - #[case] entries: Entries, - #[future] many_out_of_order: Entries, -) { - make(&repo, &entries); - - let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - - make(&repo, &many_out_of_order.await); - - let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - - let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; - - let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); - - assert!( - is_equal || is_sorted, - "The order is unstable: {is_equal}, or is sorted {is_sorted}." - ); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_paginated( - #[values(swarms())] repo: Swarms, - #[case] entries: Entries, - #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, -) { - make(&repo, &entries); - - let mut info_hashes = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - info_hashes.sort(); - - match paginated { - // it should return empty if limit is zero. - Pagination { limit: 0, .. } => { - let page = repo.get_paginated(Some(&paginated)); - - let futures = page.iter().map(|(i, swarm_handle)| { - let i = *i; - let swarm_handle = swarm_handle.clone(); - async move { (i, swarm_handle.lock().await.clone()) } - }); - - let swarms: Vec<(InfoHash, Swarm)> = join_all(futures).await; - - assert_eq!(swarms, vec![]); - } - - // it should return a single entry if the limit is one. - Pagination { limit: 1, offset: 0 } => { - if info_hashes.is_empty() { - assert_eq!(repo.get_paginated(Some(&paginated)).len(), 0); - } else { - let page = repo.get_paginated(Some(&paginated)); - assert_eq!(page.len(), 1); - assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); - } - } - - // it should return only the second entry if both the limit and the offset are one. - Pagination { limit: 1, offset: 1 } => { - if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)); - assert_eq!(page.len(), 1); - assert_eq!(page[0].0, info_hashes[1]); - } - } - - _ => {} - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - - make(&swarms, &entries); - - let mut metrics = AggregateSwarmMetadata::default(); - - for (_, torrent) in entries { - let stats = torrent.metadata(); - - metrics.total_torrents += 1; - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - } - - assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap(), metrics); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_import_persistent_torrents( - #[values(swarms())] swarms: Swarms, - #[case] entries: Entries, - #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, -) { - make(&swarms, &entries); - - let mut downloaded = swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded; - persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); - - swarms.import_persistent(&persistent_torrents); - - assert_eq!( - swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded, - downloaded - ); - - for (entry, _) in persistent_torrents { - assert!(swarms.get(&entry).is_some()); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - make(&swarms, &entries); - - for (info_hash, torrent) in entries { - assert_eq!( - Some(swarms.get(&info_hash).unwrap().lock().await.clone()), - Some(torrent.clone()) - ); - assert_eq!( - Some(swarms.remove(&info_hash).await.unwrap().lock().await.clone()), - Some(torrent) - ); - - assert!(swarms.get(&info_hash).is_none()); - assert!(swarms.remove(&info_hash).await.is_none()); - } - - assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, 0); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - use std::ops::Sub as _; - use std::time::Duration; - - use torrust_tracker_clock::clock::stopped::Stopped as _; - use torrust_tracker_clock::clock::{self, Time as _}; - use torrust_tracker_primitives::peer; - - use crate::CurrentClock; - - const TIMEOUT: Duration = Duration::from_secs(120); - const EXPIRE: Duration = Duration::from_secs(121); - - make(&swarms, &entries); - - let info_hash: InfoHash; - let mut peer: peer::Peer; - - // Generate a new infohash and peer. - { - let hash = &mut DefaultHasher::default(); - hash.write_u8(255); - info_hash = InfoHash::from(&hash.clone()); - peer = a_completed_peer(-1); - } - - // Set the last updated time of the peer to be 121 seconds ago. - { - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - peer.updated = now.sub(EXPIRE); - } - - // Insert the infohash and peer into the repository - // and verify there is an extra torrent entry. - { - swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - assert_eq!( - swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, - entries.len() as u64 + 1 - ); - } - - // Insert the infohash and peer into the repository - // and verify the swarm metadata was updated. - { - swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let stats = swarms.get_swarm_metadata(&info_hash).await.unwrap(); - assert_eq!( - stats, - Some(SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }) - ); - } - - // Verify that this new peer was inserted into the repository. - { - let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock().await; - assert!(entry.peers(None).contains(&peer.into())); - } - - // Remove peers that have not been updated since the timeout (120 seconds ago). - { - swarms - .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) - .await - .unwrap(); - } - - // Verify that the this peer was removed from the repository. - { - let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock().await; - assert!(!entry.peers(None).contains(&peer.into())); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_peerless_torrents( - #[values(swarms())] swarms: Swarms, - #[case] entries: Entries, - #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, -) { - make(&swarms, &entries); - - swarms.remove_peerless_torrents(&policy).await.unwrap(); - - let paginated = swarms.get_paginated(None); // ← store the result in a named variable - - let futures = paginated.iter().map(|(i, swarm_handle)| { - let i = *i; - let swarm_handle = swarm_handle.clone(); - async move { (i, swarm_handle.lock().await.clone()) } - }); - - let torrents: Vec<(InfoHash, Swarm)> = join_all(futures).await; - - for (_, entry) in torrents { - assert!(entry.meets_retaining_policy(&policy)); - } -} From c2dabb2fcc5a4bddacfc21c68f0e626a547c82af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:15:24 +0100 Subject: [PATCH 065/247] chore: [#1504] remove uneeded fn attribute --- packages/torrent-repository/src/swarms.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 8b8327778..ac2490853 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -49,7 +49,6 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - #[allow(clippy::await_holding_lock)] pub async fn handle_announcement( &self, info_hash: &InfoHash, From 1472c8e99ac145ec03140f719e08786e750892ca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:17:54 +0100 Subject: [PATCH 066/247] refactor: [#1504] remove unneded trait implementationis for Swarm --- packages/torrent-repository/src/swarm.rs | 37 ------------------------ 1 file changed, 37 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 8cf2982e6..f25304979 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -1,8 +1,6 @@ //! A swarm is a collection of peers that are all trying to download the same //! torrent. use std::collections::BTreeMap; -use std::fmt::Debug; -use std::hash::{Hash, Hasher}; use std::net::SocketAddr; use std::sync::Arc; @@ -24,31 +22,6 @@ pub struct Swarm { event_sender: Sender, } -#[allow(clippy::missing_fields_in_debug)] -impl Debug for Swarm { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Swarm") - .field("peers", &self.peers) - .field("metadata", &self.metadata) - .finish() - } -} - -impl Hash for Swarm { - fn hash(&self, state: &mut H) { - self.peers.hash(state); - self.metadata.hash(state); - } -} - -impl PartialEq for Swarm { - fn eq(&self, other: &Self) -> bool { - self.peers == other.peers && self.metadata == other.metadata - } -} - -impl Eq for Swarm {} - impl Swarm { #[must_use] pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { @@ -329,16 +302,6 @@ mod tests { use crate::swarm::Swarm; use crate::tests::sample_info_hash; - #[test] - fn it_should_allow_debugging() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); - - assert_eq!( - format!("{swarm:?}"), - "Swarm { peers: {}, metadata: SwarmMetadata { downloaded: 0, complete: 0, incomplete: 0 } }" - ); - } - #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { let swarm = Swarm::new(&sample_info_hash(), 0, None); From 85d9d3562bfaca3295f0cf2c3e879e061e7169ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 May 2025 10:48:31 +0100 Subject: [PATCH 067/247] refactor: [#1493] remove duplicate code for Peer buidler --- .../tests/server/v1/contract.rs | 14 ++-- packages/primitives/src/peer.rs | 33 ++++++-- .../tests/common/torrent_peer_builder.rs | 80 ++----------------- .../src/handlers/announce.rs | 27 ++++--- .../udp-tracker-server/src/handlers/mod.rs | 51 +----------- .../udp-tracker-server/src/handlers/scrape.rs | 9 ++- 6 files changed, 61 insertions(+), 153 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d864ba67c..d9ac2e1e1 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -1012,7 +1012,7 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1053,7 +1053,7 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() + .with_no_bytes_left_to_download() .build(), ) .await; @@ -1286,7 +1286,7 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1323,7 +1323,7 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1500,7 +1500,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1532,7 +1532,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1584,7 +1584,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 57ca3909d..c271ee5d6 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -558,21 +558,30 @@ pub mod fixture { self } - #[allow(dead_code)] #[must_use] - pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + pub fn with_bytes_left_to_download(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes::new(left); self } - #[allow(dead_code)] #[must_use] - pub fn with_no_bytes_pending_to_download(mut self) -> Self { + pub fn with_no_bytes_left_to_download(mut self) -> Self { self.peer.left = NumberOfBytes::new(0); self } - #[allow(dead_code)] #[must_use] pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { self.peer.updated = updated; @@ -585,13 +594,23 @@ pub mod fixture { self } - #[allow(dead_code)] + #[must_use] + pub fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + #[must_use] pub fn build(self) -> Peer { self.into() } - #[allow(dead_code)] #[must_use] pub fn into(self) -> Peer { self.peer diff --git a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs index 33120180d..48aa981cd 100644 --- a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs @@ -1,79 +1,15 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use crate::CurrentClock; - -#[derive(Debug, Default)] -struct TorrentPeerBuilder { - peer: peer::Peer, -} - -#[allow(dead_code)] -impl TorrentPeerBuilder { - #[must_use] - fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - #[must_use] - fn with_event_started(mut self) -> Self { - self.peer.event = AnnounceEvent::Started; - self - } - - #[must_use] - fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - fn into(self) -> peer::Peer { - self.peer - } -} +use torrust_tracker_primitives::peer::fixture::PeerBuilder; +use torrust_tracker_primitives::peer::{self}; /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(0) + PeerBuilder::default() + .with_bytes_left_to_download(0) .with_event_completed() - .with_peer_id(*peer_id) + .with_peer_id(&peer_id) .into() } @@ -82,9 +18,9 @@ pub fn a_completed_peer(id: i32) -> peer::Peer { #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(1) + PeerBuilder::default() + .with_bytes_left_to_download(1) .with_event_started() - .with_peer_id(*peer_id) + .with_peer_id(&peer_id) .into() } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 65b521f27..567f43740 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -207,6 +207,7 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -216,7 +217,6 @@ mod tests { initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, MockUdpServerStatsEventSender, - TorrentPeerBuilder, }; #[tokio::test] @@ -258,8 +258,8 @@ mod tests { .get_torrent_peers(&info_hash.0.into()) .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .updated_on(peers[0].updated) .into(); @@ -364,8 +364,8 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -466,13 +466,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, - TorrentPeerBuilder, }; #[tokio::test] @@ -516,8 +516,8 @@ mod tests { let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .updated_on(peers[0].updated) .into(); @@ -547,6 +547,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -555,7 +556,7 @@ mod tests { use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpServerStatsEventSender, }; #[tokio::test] @@ -598,8 +599,8 @@ mod tests { .get_torrent_peers(&info_hash.0.into()) .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .updated_on(peers[0].updated) .into(); @@ -707,8 +708,8 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index ca834c006..831073333 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -208,7 +208,6 @@ pub(crate) mod tests { use std::ops::Range; use std::sync::Arc; - use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; @@ -225,14 +224,12 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; - use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_events::sender::SendError; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::{event as server_event, CurrentClock}; + use crate::event as server_event; pub(crate) struct CoreTrackerServices { pub core_config: Arc, @@ -360,52 +357,6 @@ pub(crate) mod tests { sample_issue_time() - 10.0..sample_issue_time() + 10.0 } - #[derive(Debug, Default)] - pub(crate) struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - #[must_use] - pub fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - pub fn into(self) -> peer::Peer { - self.peer - } - } - pub(crate) struct TrackerConfigurationBuilder { configuration: Configuration, } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index e35e118b4..a9462e0f9 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -93,6 +93,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::bus::EventBus; @@ -100,7 +101,7 @@ mod tests { use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, + sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { @@ -158,10 +159,10 @@ mod tests { ) { let peer_id = PeerId([255u8; 20]); - let peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(*remote_addr) - .with_number_of_bytes_left(0) + .with_bytes_left_to_download(0) .into(); let _number_of_downloads_increased = in_memory_torrent_repository From b11af88ee3981faa92d26f64b6216d56ec1ff473 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 May 2025 17:51:21 +0100 Subject: [PATCH 068/247] feat: [#1522] add events metrics in torrent-repository These new metrics just count the number of times events have ocurred. --- .../src/statistics/event/handler.rs | 206 ++++++++++++++++-- .../torrent-repository/src/statistics/mod.rs | 41 +++- 2 files changed, 229 insertions(+), 18 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2b61839b8..f8d350a80 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -8,7 +8,9 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, + TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, }; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { @@ -20,6 +22,14 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + now, + ) + .await; } Event::TorrentRemoved { info_hash } => { tracing::debug!(info_hash = ?info_hash, "Torrent removed",); @@ -27,29 +37,41 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + now, + ) + .await; } // Peer events Event::PeerAdded { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + let label_set = label_set_for_peer(&peer); + let _unused = stats_repository - .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), - &label_set_for_peer(&peer), - now, - ) + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .await; + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), &label_set, now) .await; } Event::PeerRemoved { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + let label_set = label_set_for_peer(&peer); + let _unused = stats_repository - .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), - &label_set_for_peer(&peer), - now, - ) + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .await; + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), &label_set, now) .await; } Event::PeerUpdated { @@ -76,6 +98,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: ) .await; } + + let label_set = label_set_for_peer(&new_peer); + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), &label_set, now) + .await; } Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); @@ -92,7 +120,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: } /// Returns the label set to be included in the metrics for the given peer. -fn label_set_for_peer(peer: &Peer) -> LabelSet { +pub(crate) fn label_set_for_peer(peer: &Peer) -> LabelSet { if peer.is_seeder() { (label_name!("peer_role"), LabelValue::new("seeder")).into() } else { @@ -135,7 +163,7 @@ mod tests { opposite_role_peer } - async fn expect_counter_metric_to_be( + pub async fn expect_counter_metric_to_be( stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet, @@ -186,9 +214,11 @@ mod tests { use crate::event::Event; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::handler::tests::expect_gauge_metric_to_be; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, expect_gauge_metric_to_be}; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; + use crate::statistics::{ + TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -242,9 +272,73 @@ mod tests { expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_added_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_removed_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } } mod for_peer_metrics { + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::tests::expect_counter_metric_to_be; + use crate::statistics::event::handler::{handle_event, label_set_for_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::{ + TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + }; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; mod peer_connections_total { @@ -383,6 +477,88 @@ mod tests { } } + #[tokio::test] + async fn it_should_increment_the_number_of_peers_added_when_a_peer_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_removed_when_a_peer_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_updated_when_a_peer_updated_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let new_peer = sample_peer(); + + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer: sample_peer(), + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &label_set_for_peer(&new_peer), + 1, + ) + .await; + } + mod torrent_downloads_total { use std::sync::Arc; diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 18dcf83ea..7d3ad85ce 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -9,11 +9,18 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics +const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torrents_added_total"; +const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; + const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; // Peers metrics +const TORRENT_REPOSITORY_PEERS_ADDED_TOTAL: &str = "torrent_repository_peers_added_total"; +const TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL: &str = "torrent_repository_peers_removed_total"; +const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_updated_total"; + const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet @@ -23,6 +30,18 @@ pub fn describe_metrics() -> Metrics { // Torrent metrics + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents removed.")), + ); + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), @@ -32,13 +51,29 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new( - "The total number of torrent downloads (since the tracker process started).", - )), + Some(&MetricDescription::new("The total number of torrent downloads.")), ); // Peers metrics + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers removed.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers updated.")), + ); + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), From 260f7ffbe557d84ae400f152c4fc3c9980eb4b27 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 20 May 2025 12:07:45 +0100 Subject: [PATCH 069/247] feat: [#1523] add new metric: number of inactive peers The metric is added to the `torrent-repository` package. The metric in Prometheus format: ``` torrent_repository_peers_inactive_total{} 0 ``` It was not included as a new label in the number of peers because it can't be calculated from current events. New inactivity events could have been added but the solution was much more complex than this and having two metrics counting peers is not so bad. The discarded alternative was addinga new label por satte (`active`, `inactive`). --- Cargo.lock | 1 + packages/torrent-repository/Cargo.toml | 1 + .../torrent-repository/src/statistics/mod.rs | 8 +++ .../src/statistics/peers_inactivity_update.rs | 72 +++++++++++++++++++ .../src/statistics/repository.rs | 25 +++++++ packages/torrent-repository/src/swarm.rs | 24 +++++++ packages/torrent-repository/src/swarms.rs | 28 ++++++++ packages/tracker-core/src/torrent/manager.rs | 10 ++- src/app.rs | 19 ++++- src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/peers_inactivity_update.rs | 27 +++++++ 11 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 packages/torrent-repository/src/statistics/peers_inactivity_update.rs create mode 100644 src/bootstrap/jobs/peers_inactivity_update.rs diff --git a/Cargo.lock b/Cargo.lock index ab898e327..6e4ab415f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4832,6 +4832,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", + "chrono", "criterion", "crossbeam-skiplist", "futures", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 98ae5817d..510a59e9d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -18,6 +18,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" +chrono = { version = "0", default-features = false, features = ["clock"] } crossbeam-skiplist = "0" futures = "0" serde = { version = "1.0.219", features = ["derive"] } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 7d3ad85ce..0f8a839ca 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -1,5 +1,6 @@ pub mod event; pub mod metrics; +pub mod peers_inactivity_update; pub mod repository; use metrics::Metrics; @@ -23,6 +24,7 @@ const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_u const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet +const TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL: &str = "torrent_repository_peers_inactive_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -88,5 +90,11 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("The total number of unique peers.")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of inactive peers.")), + ); + metrics } diff --git a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs new file mode 100644 index 000000000..e388173a1 --- /dev/null +++ b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs @@ -0,0 +1,72 @@ +//! Job that runs a task on intervals to update peers' inactivity metrics. +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::instrument; + +use super::repository::Repository; +use crate::statistics::TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL; +use crate::{CurrentClock, Swarms}; + +#[must_use] +#[instrument(skip(swarms, stats_repository))] +pub fn start_job( + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) -> JoinHandle<()> { + let weak_swarms = std::sync::Arc::downgrade(swarms); + let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); + + let interval_in_secs = 15; // todo: make this configurable + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval_in_secs); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping peers inactivity metrics update job ..."); + break; + } + _ = interval.tick() => { + if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { + let start_time = Utc::now().time(); + + tracing::debug!("Updating peers inactivity metrics (executed every {} secs) ...", interval_in_secs); + + let inactive_peers_total = swarms.count_inactive_peers(inactivity_cutoff).await; + + tracing::info!(inactive_peers_total = inactive_peers_total); + + #[allow(clippy::cast_precision_loss)] + let inactive_peers_total = inactive_peers_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_peers_total, + CurrentClock::now(), + ) + .await; + + tracing::debug!( + "Peers inactivity metrics updated in {} ms", + (Utc::now().time() - start_time).num_milliseconds() + ); + } else { + break; + } + } + } + } + }) +} diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index 1e376faf7..fe1292d00 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -57,6 +57,31 @@ impl Repository { result } + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index f25304979..d7a1ede87 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -118,6 +118,14 @@ impl Swarm { (seeders, leechers) } + #[must_use] + pub fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .count() + } + #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -435,6 +443,22 @@ mod tests { assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } + #[tokio::test] + async fn it_should_count_inactive_peers() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + let inactive_peers_total = swarm.count_inactive_peers(last_update_time + one_second); + + assert_eq!(inactive_peers_total, 1); + } + #[tokio::test] async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index ac2490853..811bf6a50 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -248,6 +248,18 @@ impl Swarms { } } + /// Counts the number of inactive peers across all torrents. + pub async fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let mut inactive_peers_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + inactive_peers_total += swarm.count_inactive_peers(current_cutoff); + } + + inactive_peers_total + } + /// Removes inactive peers from all torrent entries. /// /// A peer is considered inactive if its last update timestamp is older than @@ -705,6 +717,22 @@ mod tests { assert!(swarms.get(&info_hash).is_none()); } + #[tokio::test] + async fn it_should_count_inactive_peers() { + let swarms = Arc::new(Swarms::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Cut off time is 1 second after the peer was updated + let inactive_peers_total = swarms.count_inactive_peers(peer.updated.add(Duration::from_secs(1))).await; + + assert_eq!(inactive_peers_total, 1); + } + #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let swarms = Arc::new(Swarms::default()); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index bc193bd4f..bf73f7e8b 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -4,6 +4,7 @@ use std::time::Duration; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; use super::repository::persisted::DatabasePersistentTorrentRepository; @@ -103,10 +104,13 @@ impl TorrentsManager { } async fn remove_inactive_peers(&self) { - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) - .unwrap_or_default(); + self.in_memory_torrent_repository + .remove_inactive_peers(self.current_cutoff()) + .await; + } - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff).await; + fn current_cutoff(&self) -> DurationSinceUnixEpoch { + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))).unwrap_or_default() } async fn remove_peerless_torrents(&self) { diff --git a/src/app.rs b/src/app.rs index ca8b7a5c3..1c2d9387e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -27,7 +27,9 @@ use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; use crate::bootstrap::jobs::manager::JobManager; -use crate::bootstrap::jobs::{self, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::bootstrap::jobs::{ + self, health_check_api, http_tracker, peers_inactivity_update, torrent_cleanup, tracker_apis, udp_tracker, +}; use crate::bootstrap::{self}; use crate::container::AppContainer; @@ -79,8 +81,11 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; - start_the_http_api(config, app_container, &mut job_manager).await; + start_torrent_cleanup(config, app_container, &mut job_manager); + start_peers_inactivity_update(config, app_container, &mut job_manager); + + start_the_http_api(config, app_container, &mut job_manager).await; start_health_check_api(config, app_container, &mut job_manager).await; job_manager @@ -260,6 +265,16 @@ fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + if config.core.tracker_usage_statistics { + let handle = peers_inactivity_update::start_job(config, app_container); + + job_manager.push("peers_inactivity_update", handle); + } else { + tracing::info!("Peers inactivity update job is disabled."); + } +} + async fn start_health_check_api(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let handle = health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await; diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index b311c6da6..f593ce808 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -10,6 +10,7 @@ pub mod health_check_api; pub mod http_tracker; pub mod http_tracker_core; pub mod manager; +pub mod peers_inactivity_update; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; diff --git a/src/bootstrap/jobs/peers_inactivity_update.rs b/src/bootstrap/jobs/peers_inactivity_update.rs new file mode 100644 index 000000000..e7939720c --- /dev/null +++ b/src/bootstrap/jobs/peers_inactivity_update.rs @@ -0,0 +1,27 @@ +//! Job that runs a task on intervals to update peers' inactivity metrics. +use std::sync::Arc; +use std::time::Duration; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; +use crate::CurrentClock; + +#[must_use] +pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { + torrust_tracker_torrent_repository::statistics::peers_inactivity_update::start_job( + &app_container.torrent_repository_container.swarms.clone(), + &app_container.torrent_repository_container.stats_repository.clone(), + peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), + ) +} + +/// Returns the timestamp of the cutoff for inactive peers. +/// +/// Peers that has not been updated for more than `max_peer_timeout` seconds are +/// considered inactive. +fn peer_inactivity_cutoff_timestamp(max_peer_timeout: u32) -> Duration { + CurrentClock::now_sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default() +} From 677deacdc419526122eff62973f2685ac976a5eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 12:34:12 +0100 Subject: [PATCH 070/247] feat: [#1523] add new metric: number of inactive torrents --- .../statistics/activity_metrics_updater.rs | 104 ++++++++++++++++++ .../torrent-repository/src/statistics/mod.rs | 9 +- .../src/statistics/peers_inactivity_update.rs | 72 ------------ packages/torrent-repository/src/swarm.rs | 35 ++++++ packages/torrent-repository/src/swarms.rs | 51 +++++++++ src/app.rs | 4 +- ..._update.rs => activity_metrics_updater.rs} | 4 +- src/bootstrap/jobs/mod.rs | 2 +- 8 files changed, 203 insertions(+), 78 deletions(-) create mode 100644 packages/torrent-repository/src/statistics/activity_metrics_updater.rs delete mode 100644 packages/torrent-repository/src/statistics/peers_inactivity_update.rs rename src/bootstrap/jobs/{peers_inactivity_update.rs => activity_metrics_updater.rs} (84%) diff --git a/packages/torrent-repository/src/statistics/activity_metrics_updater.rs b/packages/torrent-repository/src/statistics/activity_metrics_updater.rs new file mode 100644 index 000000000..2dfa5fb4e --- /dev/null +++ b/packages/torrent-repository/src/statistics/activity_metrics_updater.rs @@ -0,0 +1,104 @@ +//! Job that runs a task on intervals to update peers' activity metrics. +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::instrument; + +use super::repository::Repository; +use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; +use crate::{CurrentClock, Swarms}; + +#[must_use] +#[instrument(skip(swarms, stats_repository))] +pub fn start_job( + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) -> JoinHandle<()> { + let weak_swarms = std::sync::Arc::downgrade(swarms); + let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); + + let interval_in_secs = 15; // todo: make this configurable + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval_in_secs); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping peers activity metrics update job (ctrl-c signal received) ..."); + break; + } + _ = interval.tick() => { + if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { + update_activity_metrics(interval_in_secs, &swarms, &stats_repository, inactivity_cutoff).await; + } else { + tracing::info!("Stopping peers activity metrics update job (can't upgrade weak pointers) ..."); + break; + } + } + } + } + }) +} + +async fn update_activity_metrics( + interval_in_secs: u64, + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) { + let start_time = Utc::now().time(); + + tracing::debug!( + "Updating peers and torrents activity metrics (executed every {} secs) ...", + interval_in_secs + ); + + let activity_metadata = swarms.get_activity_metadata(inactivity_cutoff).await; + + activity_metadata.log(); + + update_inactive_peers_total(stats_repository, activity_metadata.inactive_peers_total).await; + update_inactive_torrents_total(stats_repository, activity_metadata.inactive_torrents_total).await; + + tracing::debug!( + "Peers and torrents activity metrics updated in {} ms", + (Utc::now().time() - start_time).num_milliseconds() + ); +} + +async fn update_inactive_peers_total(stats_repository: &Arc, inactive_peers_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_peers_total = inactive_peers_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_peers_total, + CurrentClock::now(), + ) + .await; +} + +async fn update_inactive_torrents_total(stats_repository: &Arc, inactive_torrents_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_torrents_total = inactive_torrents_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_torrents_total, + CurrentClock::now(), + ) + .await; +} diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 0f8a839ca..cfc252e34 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -1,6 +1,6 @@ +pub mod activity_metrics_updater; pub mod event; pub mod metrics; -pub mod peers_inactivity_update; pub mod repository; use metrics::Metrics; @@ -15,6 +15,7 @@ const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torr const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics @@ -56,6 +57,12 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("The total number of torrent downloads.")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of inactive torrents.")), + ); + // Peers metrics metrics.metric_collection.describe_counter( diff --git a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs deleted file mode 100644 index e388173a1..000000000 --- a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Job that runs a task on intervals to update peers' inactivity metrics. -use std::sync::Arc; - -use chrono::Utc; -use tokio::task::JoinHandle; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_name; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use tracing::instrument; - -use super::repository::Repository; -use crate::statistics::TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL; -use crate::{CurrentClock, Swarms}; - -#[must_use] -#[instrument(skip(swarms, stats_repository))] -pub fn start_job( - swarms: &Arc, - stats_repository: &Arc, - inactivity_cutoff: DurationSinceUnixEpoch, -) -> JoinHandle<()> { - let weak_swarms = std::sync::Arc::downgrade(swarms); - let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); - - let interval_in_secs = 15; // todo: make this configurable - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval_in_secs); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - tracing::info!("Stopping peers inactivity metrics update job ..."); - break; - } - _ = interval.tick() => { - if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { - let start_time = Utc::now().time(); - - tracing::debug!("Updating peers inactivity metrics (executed every {} secs) ...", interval_in_secs); - - let inactive_peers_total = swarms.count_inactive_peers(inactivity_cutoff).await; - - tracing::info!(inactive_peers_total = inactive_peers_total); - - #[allow(clippy::cast_precision_loss)] - let inactive_peers_total = inactive_peers_total as f64; - - let _unused = stats_repository - .set_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), - &LabelSet::default(), - inactive_peers_total, - CurrentClock::now(), - ) - .await; - - tracing::debug!( - "Peers inactivity metrics updated in {} ms", - (Utc::now().time() - start_time).num_milliseconds() - ); - } else { - break; - } - } - } - } - }) -} diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d7a1ede87..b9076289b 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -126,6 +126,17 @@ impl Swarm { .count() } + #[must_use] + pub fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> ActivityMetadata { + let inactive_peers_total = self.count_inactive_peers(current_cutoff); + + let active_peers_total = self.len() - inactive_peers_total; + + let is_active = active_peers_total > 0; + + ActivityMetadata::new(is_active, active_peers_total, inactive_peers_total) + } + #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -296,6 +307,30 @@ impl Swarm { } } +#[derive(Clone)] +pub struct ActivityMetadata { + /// Indicates if the swarm is active. It's inactive if there are no active + /// peers. + pub is_active: bool, + + /// The number of active peers in the swarm. + pub active_peers_total: usize, + + /// The number of inactive peers in the swarm. + pub inactive_peers_total: usize, +} + +impl ActivityMetadata { + #[must_use] + pub fn new(is_active: bool, active_peers_total: usize, inactive_peers_total: usize) -> Self { + Self { + is_active, + active_peers_total, + inactive_peers_total, + } + } +} + #[cfg(test)] mod tests { diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 811bf6a50..36f83070d 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -248,6 +248,32 @@ impl Swarms { } } + pub async fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> AggregateActivityMetadata { + let mut active_peers_total = 0; + let mut inactive_peers_total = 0; + let mut active_torrents_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + let activity_metadata = swarm.get_activity_metadata(current_cutoff); + + if activity_metadata.is_active { + active_torrents_total += 1; + } + + active_peers_total += activity_metadata.active_peers_total; + inactive_peers_total += activity_metadata.inactive_peers_total; + } + + AggregateActivityMetadata { + active_peers_total, + inactive_peers_total, + active_torrents_total, + inactive_torrents_total: self.len() - active_torrents_total, + } + } + /// Counts the number of inactive peers across all torrents. pub async fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { let mut inactive_peers_total = 0; @@ -446,6 +472,31 @@ impl Swarms { #[derive(thiserror::Error, Debug, Clone)] pub enum Error {} +#[derive(Clone, Debug, Default)] +pub struct AggregateActivityMetadata { + /// The number of active peers in all swarms. + pub active_peers_total: usize, + + /// The number of inactive peers in all swarms. + pub inactive_peers_total: usize, + + /// The number of active torrents. + pub active_torrents_total: usize, + + /// The number of inactive torrents. + pub inactive_torrents_total: usize, +} + +impl AggregateActivityMetadata { + pub fn log(&self) { + tracing::info!( + active_peers_total = self.active_peers_total, + inactive_peers_total = self.inactive_peers_total, + active_torrents_total = self.active_torrents_total, + inactive_torrents_total = self.inactive_torrents_total + ); + } +} #[cfg(test)] mod tests { diff --git a/src/app.rs b/src/app.rs index 1c2d9387e..5180e4583 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,7 +28,7 @@ use tracing::instrument; use crate::bootstrap::jobs::manager::JobManager; use crate::bootstrap::jobs::{ - self, health_check_api, http_tracker, peers_inactivity_update, torrent_cleanup, tracker_apis, udp_tracker, + self, activity_metrics_updater, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker, }; use crate::bootstrap::{self}; use crate::container::AppContainer; @@ -267,7 +267,7 @@ fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if config.core.tracker_usage_statistics { - let handle = peers_inactivity_update::start_job(config, app_container); + let handle = activity_metrics_updater::start_job(config, app_container); job_manager.push("peers_inactivity_update", handle); } else { diff --git a/src/bootstrap/jobs/peers_inactivity_update.rs b/src/bootstrap/jobs/activity_metrics_updater.rs similarity index 84% rename from src/bootstrap/jobs/peers_inactivity_update.rs rename to src/bootstrap/jobs/activity_metrics_updater.rs index e7939720c..7411c05cf 100644 --- a/src/bootstrap/jobs/peers_inactivity_update.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -1,4 +1,4 @@ -//! Job that runs a task on intervals to update peers' inactivity metrics. +//! Job that runs a task on intervals to update peers' activity metrics. use std::sync::Arc; use std::time::Duration; @@ -11,7 +11,7 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { - torrust_tracker_torrent_repository::statistics::peers_inactivity_update::start_job( + torrust_tracker_torrent_repository::statistics::activity_metrics_updater::start_job( &app_container.torrent_repository_container.swarms.clone(), &app_container.torrent_repository_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index f593ce808..c8d7a8598 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -6,11 +6,11 @@ //! 2. Launch all the application services as concurrent jobs. //! //! This modules contains all the functions needed to start those jobs. +pub mod activity_metrics_updater; pub mod health_check_api; pub mod http_tracker; pub mod http_tracker_core; pub mod manager; -pub mod peers_inactivity_update; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; From 3a23a38b38c059311b5213e8e6055ac809d6f648 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 16:56:39 +0100 Subject: [PATCH 071/247] fix: tracing message --- src/bootstrap/jobs/torrent_repository.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index 2125de554..ea0d215ee 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -14,7 +14,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Wed, 21 May 2025 17:11:52 +0100 Subject: [PATCH 072/247] feat: [#1524] listens to torrent-repository events in the tracker-core pkg This will enable udpating stats (number of torrent downloads per torrent) from the event handler (persisting in the DB). And after that, I will enable adding lebeled metrics. --- Cargo.lock | 1 + packages/tracker-core/Cargo.toml | 1 + packages/tracker-core/src/lib.rs | 3 ++ .../src/statistics/event/handler.rs | 32 ++++++++++++ .../src/statistics/event/listener.rs | 52 +++++++++++++++++++ .../tracker-core/src/statistics/event/mod.rs | 2 + packages/tracker-core/src/statistics/mod.rs | 1 + src/app.rs | 9 ++++ src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/tracker_core.rs | 21 ++++++++ 10 files changed, 123 insertions(+) create mode 100644 packages/tracker-core/src/statistics/event/handler.rs create mode 100644 packages/tracker-core/src/statistics/event/listener.rs create mode 100644 packages/tracker-core/src/statistics/event/mod.rs create mode 100644 packages/tracker-core/src/statistics/mod.rs create mode 100644 src/bootstrap/jobs/tracker_core.rs diff --git a/Cargo.lock b/Cargo.lock index 6e4ab415f..5415149e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -676,6 +676,7 @@ dependencies = [ "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index ac1cee88d..3c89505b2 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -29,6 +29,7 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 82ebac3c6..dacf41383 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -124,6 +124,7 @@ pub mod container; pub mod databases; pub mod error; pub mod scrape_handler; +pub mod statistics; pub mod torrent; pub mod whitelist; @@ -156,6 +157,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const TRACKER_CORE_LOG_TARGET: &str = "TRACKER_CORE"; + #[cfg(test)] mod tests { mod the_tracker { diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs new file mode 100644 index 000000000..bdd4d414b --- /dev/null +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -0,0 +1,32 @@ +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_torrent_repository::event::Event; + +pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { + match event { + // Torrent events + Event::TorrentAdded { info_hash, .. } => { + tracing::debug!(info_hash = ?info_hash, "Torrent added",); + } + Event::TorrentRemoved { info_hash } => { + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); + } + + // Peer events + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + } + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + } + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated"); + } + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + } + } +} diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs new file mode 100644 index 000000000..2fe068b76 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -0,0 +1,52 @@ +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; +use torrust_tracker_torrent_repository::event::receiver::Receiver; + +use super::handler::handle_event; +use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener(receiver: Receiver) -> JoinHandle<()> { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); + + tokio::spawn(async move { + dispatch_events(receiver).await; + + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener"); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver closed"); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver lagged by {} events", n); + } + } + } + } + } + } + } +} diff --git a/packages/tracker-core/src/statistics/event/mod.rs b/packages/tracker-core/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs new file mode 100644 index 000000000..53f112654 --- /dev/null +++ b/packages/tracker-core/src/statistics/mod.rs @@ -0,0 +1 @@ +pub mod event; diff --git a/src/app.rs b/src/app.rs index 5180e4583..3b6abb86f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -75,6 +75,7 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - let mut job_manager = JobManager::new(); start_torrent_repository_event_listener(config, app_container, &mut job_manager); + start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); start_udp_server_event_listener(config, app_container, &mut job_manager); @@ -145,6 +146,14 @@ fn start_torrent_repository_event_listener( } } +fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let opt_handle = jobs::tracker_core::start_event_listener(config, app_container); + + if let Some(handle) = opt_handle { + job_manager.push("tracker_core_event_listener", handle); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index c8d7a8598..0e9c912af 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -14,6 +14,7 @@ pub mod manager; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; +pub mod tracker_core; pub mod udp_tracker; pub mod udp_tracker_core; pub mod udp_tracker_server; diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs new file mode 100644 index 000000000..28eb745c2 --- /dev/null +++ b/src/bootstrap/jobs/tracker_core.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + // todo: enable this when labeled metrics are implemented. + //if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { + if config.core.tracker_policy.persistent_torrent_completed_stat { + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + app_container.torrent_repository_container.event_bus.receiver(), + ); + + Some(job) + } else { + tracing::info!("Tracker core event listener job is disabled."); + None + } +} From 896875738f62b863bb87f27558f0e2344703110a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 17:26:05 +0100 Subject: [PATCH 073/247] refactor: extract method JobManger::push_opt --- src/app.rs | 45 ++++++++++++++++------------------- src/bootstrap/jobs/manager.rs | 6 +++++ 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/app.rs b/src/app.rs index 3b6abb86f..5037ad761 100644 --- a/src/app.rs +++ b/src/app.rs @@ -139,43 +139,38 @@ fn start_torrent_repository_event_listener( app_container: &Arc, job_manager: &mut JobManager, ) { - let opt_handle = jobs::torrent_repository::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("torrent_repository_event_listener", handle); - } + job_manager.push_opt( + "torrent_repository_event_listener", + jobs::torrent_repository::start_event_listener(config, app_container), + ); } fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("tracker_core_event_listener", handle); - } + job_manager.push_opt( + "tracker_core_event_listener", + jobs::tracker_core::start_event_listener(config, app_container), + ); } fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("http_core_event_listener", handle); - } + job_manager.push_opt( + "http_core_event_listener", + jobs::http_tracker_core::start_event_listener(config, app_container), + ); } fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::udp_tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("udp_core_event_listener", handle); - } + job_manager.push_opt( + "udp_core_event_listener", + jobs::udp_tracker_core::start_event_listener(config, app_container), + ); } fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::udp_tracker_server::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("udp_server_event_listener", handle); - } + job_manager.push_opt( + "udp_server_event_listener", + jobs::udp_tracker_server::start_event_listener(config, app_container), + ); } async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs index 5beab3224..53733844b 100644 --- a/src/bootstrap/jobs/manager.rs +++ b/src/bootstrap/jobs/manager.rs @@ -36,6 +36,12 @@ impl JobManager { self.jobs.push(Job::new(name, handle)); } + pub fn push_opt>(&mut self, name: N, handle: Option>) { + if let Some(handle) = handle { + self.push(name, handle); + } + } + /// Waits sequentially for all jobs to complete, with a graceful timeout per /// job. pub async fn wait_for_all(mut self, grace_period: Duration) { From e90585af80fc7a153708731ef1d5488da4e549d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 18:16:39 +0100 Subject: [PATCH 074/247] refactor: [#1524] move total downloads udpate from announce command to event handler --- packages/tracker-core/src/announce_handler.rs | 11 ++++++----- .../src/statistics/event/handler.rs | 19 ++++++++++++++++++- .../src/statistics/event/listener.rs | 16 ++++++++++++---- src/bootstrap/jobs/tracker_core.rs | 1 + 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index a2e8db743..61e5de125 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -163,6 +163,11 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; + // This will be removed in the future. + // See https://github.com/torrust/torrust-tracker/issues/1502 + // There will be a persisted metric for counting the total number of + // downloads across all torrents. The in-memory metric will count only + // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { self.db_torrent_repository.load(info_hash)? } else { @@ -171,15 +176,11 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = self + let _number_of_downloads_increased = self .in_memory_torrent_repository .upsert_peer(info_hash, peer, opt_persistent_torrent) .await; - if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { - self.db_torrent_repository.increase_number_of_downloads(info_hash)?; - } - Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index bdd4d414b..7b6ce83b7 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -1,7 +1,15 @@ +use std::sync::Arc; + use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; -pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + +pub async fn handle_event( + event: Event, + db_torrent_repository: &Arc, + _now: DurationSinceUnixEpoch, +) { match event { // Torrent events Event::TorrentAdded { info_hash, .. } => { @@ -27,6 +35,15 @@ pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { } Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); + } + } } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 2fe068b76..e04675092 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -1,23 +1,31 @@ +use std::sync::Arc; + use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + db_torrent_repository: &Arc, +) -> JoinHandle<()> { + let db_torrent_repository: Arc = db_torrent_repository.clone(); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver).await; + dispatch_events(receiver, db_torrent_repository).await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver) { +async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -33,7 +41,7 @@ async fn dispatch_events(mut receiver: Receiver) { result = receiver.recv() => { match result { - Ok(event) => handle_event(event, CurrentClock::now()).await, + Ok(event) => handle_event(event, &db_torrent_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index 28eb745c2..bb879db6b 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -11,6 +11,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Mon, 26 May 2025 09:45:42 +0100 Subject: [PATCH 075/247] refactor: [#1524] remove duplciate code for tracker core container --- packages/tracker-core/tests/integration.rs | 49 ++++------------------ 1 file changed, 7 insertions(+), 42 deletions(-) diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 5aaded10a..282dcade5 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -4,17 +4,13 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::announce_handler::PeersWanted; +use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; /// # Panics /// @@ -59,41 +55,13 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -struct Container { - pub announce_handler: Arc, - pub scrape_handler: Arc, -} - -impl Container { - pub fn initialize(config: &Core) -> Self { - let database = initialize_database(config); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - config, - &in_memory_whitelist.clone(), - )); - let announce_handler = Arc::new(AnnounceHandler::new( - config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - Self { - announce_handler, - scrape_handler, - } - } -} - #[tokio::test] async fn test_announce_and_scrape_requests() { - let config = ephemeral_configuration(); + let config = Arc::new(ephemeral_configuration()); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - let container = Container::initialize(&config); + let container = TrackerCoreContainer::initialize_from(&config, &torrent_repository_container); let info_hash = sample_info_hash(); @@ -130,6 +98,3 @@ async fn test_announce_and_scrape_requests() { assert!(scrape_data.files.contains_key(&info_hash)); } - -#[test] -fn test_scrape_request() {} From b05bccdccc90ed73f63ac9f9c61fcbfaa75f7bbf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 10:02:49 +0100 Subject: [PATCH 076/247] refactor: [#1524] integration tests in tracker-core --- packages/tracker-core/tests/integration.rs | 58 +++++++++++++++------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 282dcade5..f59b9d185 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -7,6 +7,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; @@ -55,44 +56,63 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -#[tokio::test] -async fn test_announce_and_scrape_requests() { +fn initialize() -> (Arc, Arc, InfoHash, Peer) { let config = Arc::new(ephemeral_configuration()); let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - let container = TrackerCoreContainer::initialize_from(&config, &torrent_repository_container); + let container = Arc::new(TrackerCoreContainer::initialize_from(&config, &torrent_repository_container)); let info_hash = sample_info_hash(); - let mut peer = sample_peer(); + let peer = sample_peer(); - // Announce + (config, container, info_hash, peer) +} - // First announce: download started +async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Started; - let announce_data = container + + container .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap(); - - // NOTICE: you don't get back the peer making the request. - assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 0); + .unwrap() +} - // Second announce: download completed +async fn _announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Completed; - let announce_data = container + + container .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap(); + .unwrap() +} + +#[tokio::test] +async fn it_should_handle_the_announce_request() { + let (_config, container, info_hash, mut peer) = initialize(); + + let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + + assert_eq!(announce_data, AnnounceData::default()); +} + +#[tokio::test] +async fn it_should_not_return_the_peer_making_the_announce_request() { + let (_config, container, info_hash, mut peer) = initialize(); + + let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 1); +} + +#[tokio::test] +async fn it_should_handle_the_scrape_request() { + let (_config, container, info_hash, mut peer) = initialize(); - // Scrape + let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); From ab2f52dd3781d58d56d997b42e185c2f102feafc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 12:54:12 +0100 Subject: [PATCH 077/247] fix: [#1524] test (move to integration test) --- packages/tracker-core/src/announce_handler.rs | 77 ----------- packages/tracker-core/src/torrent/manager.rs | 2 + .../src/torrent/repository/in_memory.rs | 19 --- packages/tracker-core/tests/integration.rs | 121 +++++++++++++++--- 4 files changed, 106 insertions(+), 113 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 61e5de125..0a3fef045 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -594,83 +594,6 @@ mod tests { } } - mod handling_torrent_persistence { - - use std::sync::Arc; - - use aquatic_udp_protocol::AnnounceEvent; - use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::Swarms; - - use crate::announce_handler::tests::the_announce_handler::peer_ip; - use crate::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::databases::setup::initialize_database; - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::manager::TorrentsManager; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::whitelist::authorization::WhitelistAuthorization; - use crate::whitelist::repository::in_memory::InMemoryWhitelist; - - #[tokio::test] - async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let mut config = configuration::ephemeral_public(); - - config.core.tracker_policy.persistent_torrent_completed_stat = true; - - let database = initialize_database(&config.core); - let swarms = Arc::new(Swarms::default()); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let torrents_manager = Arc::new(TorrentsManager::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let info_hash = sample_info_hash(); - - let mut peer = sample_peer(); - - peer.event = AnnounceEvent::Started; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 0); - - peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 1); - - // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash).await; - - torrents_manager.load_torrents_from_database().unwrap(); - - let torrent_entry = in_memory_torrent_repository - .get(&info_hash) - .expect("it should be able to get entry"); - - // It persists the number of completed peers. - assert_eq!(torrent_entry.lock().await.metadata().downloaded, 1); - - // It does not persist the peers - assert!(torrent_entry.lock().await.is_empty()); - } - } - mod should_allow_the_client_peers_to_specified_the_number_of_peers_wanted { use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index bf73f7e8b..f463eee98 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -74,6 +74,8 @@ impl TorrentsManager { pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; + println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); + self.in_memory_torrent_repository.import_persistent(&persistent_torrents); Ok(()) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 311480306..bf8d083f8 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -61,25 +61,6 @@ impl InMemoryTorrentRepository { .expect("Failed to upsert the peer in swarms") } - /// Removes a torrent entry from the repository. - /// - /// This method is only available in tests. It removes the torrent entry - /// associated with the given info hash and returns the removed entry if it - /// existed. - /// - /// # Arguments - /// - /// * `key` - The info hash of the torrent to remove. - /// - /// # Returns - /// - /// An `Option` containing the removed torrent entry if it existed. - #[cfg(test)] - #[must_use] - pub(crate) async fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key).await - } - /// Removes inactive peers from all torrent entries. /// /// A peer is considered inactive if its last update timestamp is older than diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index f59b9d185..7af0ec4fa 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -6,12 +6,15 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; -use torrust_tracker_configuration::Core; +use tokio::task::yield_now; +use torrust_tracker_configuration::{AnnouncePolicy, Core}; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_torrent_repository::Swarms; /// # Panics /// @@ -56,52 +59,114 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -fn initialize() -> (Arc, Arc, InfoHash, Peer) { - let config = Arc::new(ephemeral_configuration()); - - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - - let container = Arc::new(TrackerCoreContainer::initialize_from(&config, &torrent_repository_container)); +async fn initialize_test_env(core_config: Core) -> (Arc, Arc, Arc, InfoHash, Peer) { + let config = Arc::new(core_config); let info_hash = sample_info_hash(); let peer = sample_peer(); - (config, container, info_hash, peer) + let (container, swarms) = start(&config).await; + + (config, container, swarms, info_hash, peer) +} + +async fn start(core_config: &Arc) -> (Arc, Arc) { + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + + let mut jobs = vec![]; + + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + torrent_repository_container.event_bus.receiver(), + &torrent_repository_container.stats_repository, + ); + + jobs.push(job); + + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + torrent_repository_container.event_bus.receiver(), + &container.db_torrent_repository, + ); + + jobs.push(job); + + // Give the event listeners some time to start + // todo: they should notify when they are ready + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + (container, torrent_repository_container.swarms.clone()) } async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Started; - container + let announce_data = container .announce_handler .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap() + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data } -async fn _announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { +async fn announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Completed; - container + let announce_data = container .announce_handler .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap() + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data +} + +async fn increase_number_of_downloads(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) { + let _announce_data = announce_peer_started(container, peer, info_hash).await; + let announce_data = announce_peer_completed(container, peer, info_hash).await; + + assert_eq!(announce_data.stats.downloads(), 1); } #[tokio::test] async fn it_should_handle_the_announce_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; - assert_eq!(announce_data, AnnounceData::default()); + assert_eq!( + announce_data, + AnnounceData { + peers: vec![], + stats: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + policy: AnnouncePolicy { + interval: 120, + interval_min: 120 + } + } + ); } #[tokio::test] async fn it_should_not_return_the_peer_making_the_announce_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; @@ -110,7 +175,7 @@ async fn it_should_not_return_the_peer_making_the_announce_request() { #[tokio::test] async fn it_should_handle_the_scrape_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; @@ -118,3 +183,25 @@ async fn it_should_handle_the_scrape_request() { assert!(scrape_data.files.contains_key(&info_hash)); } + +#[tokio::test] +async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let mut core_config = ephemeral_configuration(); + core_config.tracker_policy.persistent_torrent_completed_stat = true; + + let (_config, container, swarms, info_hash, mut peer) = initialize_test_env(core_config).await; + + increase_number_of_downloads(&container, &mut peer, &info_hash).await; + + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + + swarms.remove(&info_hash).await.unwrap(); + + // Make sure the swarm metadata is removed + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().is_none()); + + // Load torrents from the database to ensure the completed stats are persisted + container.torrents_manager.load_torrents_from_database().unwrap(); + + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); +} From 28603fe1d877ab26076ff3e9c10a246e26122fab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 13:38:26 +0100 Subject: [PATCH 078/247] refactor: [#1524] extract TestEnv for integration tests in tracker-core --- .../tracker-core/tests/common/fixtures.rs | 52 +++++ packages/tracker-core/tests/common/mod.rs | 2 + .../tracker-core/tests/common/test_env.rs | 137 +++++++++++++ packages/tracker-core/tests/integration.rs | 191 ++++-------------- 4 files changed, 227 insertions(+), 155 deletions(-) create mode 100644 packages/tracker-core/tests/common/fixtures.rs create mode 100644 packages/tracker-core/tests/common/mod.rs create mode 100644 packages/tracker-core/tests/common/test_env.rs diff --git a/packages/tracker-core/tests/common/fixtures.rs b/packages/tracker-core/tests/common/fixtures.rs new file mode 100644 index 000000000..ea9c93a65 --- /dev/null +++ b/packages/tracker-core/tests/common/fixtures.rs @@ -0,0 +1,52 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::FromStr; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UTF-8 string. +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid infohash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(remote_client_ip(), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +// The client peer IP. +#[must_use] +pub fn remote_client_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) +} diff --git a/packages/tracker-core/tests/common/mod.rs b/packages/tracker-core/tests/common/mod.rs new file mode 100644 index 000000000..414e9d7b5 --- /dev/null +++ b/packages/tracker-core/tests/common/mod.rs @@ -0,0 +1,2 @@ +pub mod fixtures; +pub mod test_env; diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs new file mode 100644 index 000000000..8a443d8f0 --- /dev/null +++ b/packages/tracker-core/tests/common/test_env.rs @@ -0,0 +1,137 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::PeersWanted; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use tokio::task::yield_now; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; + +pub struct TestEnv { + pub torrent_repository_container: Arc, + pub tracker_core_container: Arc, +} + +impl TestEnv { + #[must_use] + pub async fn started(core_config: Core) -> Self { + let test_env = TestEnv::new(core_config); + test_env.start().await; + test_env + } + + #[must_use] + pub fn new(core_config: Core) -> Self { + let core_config = Arc::new(core_config); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + + Self { + torrent_repository_container, + tracker_core_container, + } + } + + pub async fn start(&self) { + let mut jobs = vec![]; + + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + self.torrent_repository_container.event_bus.receiver(), + &self.torrent_repository_container.stats_repository, + ); + + jobs.push(job); + + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + self.torrent_repository_container.event_bus.receiver(), + &self.tracker_core_container.db_torrent_repository, + ); + + jobs.push(job); + + // Give the event listeners some time to start + // todo: they should notify when they are ready + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + pub async fn announce_peer_started( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Started; + + let announce_data = self + .tracker_core_container + .announce_handler + .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn announce_peer_completed( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Completed; + + let announce_data = self + .tracker_core_container + .announce_handler + .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn scrape(&self, info_hash: &InfoHash) -> ScrapeData { + self.tracker_core_container + .scrape_handler + .scrape(&vec![*info_hash]) + .await + .unwrap() + } + + pub async fn increase_number_of_downloads(&mut self, peer: Peer, remote_client_ip: &IpAddr, info_hash: &InfoHash) { + let _announce_data = self.announce_peer_started(peer, remote_client_ip, info_hash).await; + let announce_data = self.announce_peer_completed(peer, remote_client_ip, info_hash).await; + + assert_eq!(announce_data.stats.downloads(), 1); + } + + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrent_repository_container + .swarms + .get_swarm_metadata(info_hash) + .await + .unwrap() + } + + pub async fn remove_swarm(&self, info_hash: &InfoHash) { + self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); + } +} diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 7af0ec4fa..d24acf67b 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -1,151 +1,18 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::str::FromStr; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::PeersWanted; -use bittorrent_tracker_core::container::TrackerCoreContainer; -use tokio::task::yield_now; -use torrust_tracker_configuration::{AnnouncePolicy, Core}; +mod common; + +use common::fixtures::{ephemeral_configuration, remote_client_ip, sample_info_hash, sample_peer}; +use common::test_env::TestEnv; +use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; -use torrust_tracker_torrent_repository::Swarms; - -/// # Panics -/// -/// Will panic if the temporary file path is not a valid UTF-8 string. -#[must_use] -pub fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid infohash. -#[must_use] -pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// Sample peer whose state is not relevant for the tests. -#[must_use] -pub fn sample_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(remote_client_ip(), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -// The client peer IP. -#[must_use] -fn remote_client_ip() -> IpAddr { - IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) -} - -async fn initialize_test_env(core_config: Core) -> (Arc, Arc, Arc, InfoHash, Peer) { - let config = Arc::new(core_config); - - let info_hash = sample_info_hash(); - - let peer = sample_peer(); - - let (container, swarms) = start(&config).await; - - (config, container, swarms, info_hash, peer) -} - -async fn start(core_config: &Arc) -> (Arc, Arc) { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( - core_config.tracker_usage_statistics.into(), - )); - - let container = Arc::new(TrackerCoreContainer::initialize_from( - core_config, - &torrent_repository_container, - )); - - let mut jobs = vec![]; - - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( - torrent_repository_container.event_bus.receiver(), - &torrent_repository_container.stats_repository, - ); - - jobs.push(job); - - let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - torrent_repository_container.event_bus.receiver(), - &container.db_torrent_repository, - ); - - jobs.push(job); - - // Give the event listeners some time to start - // todo: they should notify when they are ready - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - (container, torrent_repository_container.swarms.clone()) -} - -async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { - peer.event = AnnounceEvent::Started; - - let announce_data = container - .announce_handler - .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - - // Give time to the event listeners to process the event - yield_now().await; - - announce_data -} - -async fn announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { - peer.event = AnnounceEvent::Completed; - - let announce_data = container - .announce_handler - .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - - // Give time to the event listeners to process the event - yield_now().await; - - announce_data -} - -async fn increase_number_of_downloads(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) { - let _announce_data = announce_peer_started(container, peer, info_hash).await; - let announce_data = announce_peer_completed(container, peer, info_hash).await; - - assert_eq!(announce_data.stats.downloads(), 1); -} #[tokio::test] async fn it_should_handle_the_announce_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; assert_eq!( announce_data, @@ -166,20 +33,26 @@ async fn it_should_handle_the_announce_request() { #[tokio::test] async fn it_should_not_return_the_peer_making_the_announce_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; assert_eq!(announce_data.peers.len(), 0); } #[tokio::test] async fn it_should_handle_the_scrape_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let info_hash = sample_info_hash(); + + let _announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &info_hash) + .await; - let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + let scrape_data = test_env.scrape(&info_hash).await; assert!(scrape_data.files.contains_key(&info_hash)); } @@ -189,19 +62,27 @@ async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_t let mut core_config = ephemeral_configuration(); core_config.tracker_policy.persistent_torrent_completed_stat = true; - let (_config, container, swarms, info_hash, mut peer) = initialize_test_env(core_config).await; + let mut test_env = TestEnv::started(core_config).await; - increase_number_of_downloads(&container, &mut peer, &info_hash).await; + let info_hash = sample_info_hash(); - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &info_hash) + .await; - swarms.remove(&info_hash).await.unwrap(); + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); - // Make sure the swarm metadata is removed - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().is_none()); + test_env.remove_swarm(&info_hash).await; + + // Ensure the swarm metadata is removed + assert!(test_env.get_swarm_metadata(&info_hash).await.is_none()); // Load torrents from the database to ensure the completed stats are persisted - container.torrents_manager.load_torrents_from_database().unwrap(); + test_env + .tracker_core_container + .torrents_manager + .load_torrents_from_database() + .unwrap(); - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); } From 8c3154953f80a221de63366bd10cc7111a71f126 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 13:47:40 +0100 Subject: [PATCH 079/247] refactor: [#1524] rename methods --- .../src/environment.rs | 2 +- .../src/environment.rs | 2 +- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 6 ++--- packages/tracker-core/src/announce_handler.rs | 24 +++++++++---------- packages/tracker-core/src/lib.rs | 8 +++---- packages/tracker-core/src/scrape_handler.rs | 6 ++--- packages/tracker-core/src/torrent/manager.rs | 4 ++-- .../src/torrent/repository/in_memory.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 18 +++++++------- .../tracker-core/tests/common/test_env.rs | 6 ++--- .../udp-tracker-core/src/services/announce.rs | 2 +- .../udp-tracker-core/src/services/scrape.rs | 2 +- .../udp-tracker-server/src/environment.rs | 2 +- .../src/handlers/announce.rs | 4 ++-- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 16 files changed, 46 insertions(+), 46 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 10dada2db..59605d781 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -29,7 +29,7 @@ impl Environment { self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await } } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 92ca5a2d1..3c7ff564d 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -37,7 +37,7 @@ where self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 9f39a04e4..0ad5ed143 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -87,7 +87,7 @@ impl AnnounceService { let announce_data = self .announce_handler - .announce( + .handle_announcement( &announce_request.info_hash, &mut peer, &remote_client_addr.ip(), diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 3da1aa88f..f22f2f632 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -78,7 +78,7 @@ impl ScrapeService { let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { ScrapeData::zeroed(&scrape_request.info_hashes) } else { - self.scrape_handler.scrape(&scrape_request.info_hashes).await? + self.scrape_handler.handle_scrape(&scrape_request.info_hashes).await? }; let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; @@ -291,7 +291,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -482,7 +482,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 0a3fef045..7d37ec9ed 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -154,7 +154,7 @@ impl AnnounceHandler { /// /// Returns an error if the tracker is running in `listed` mode and the /// torrent is not whitelisted. - pub async fn announce( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &mut peer::Peer, @@ -178,7 +178,7 @@ impl AnnounceHandler { let _number_of_downloads_increased = self .in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, opt_persistent_torrent) .await; Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) @@ -456,7 +456,7 @@ mod tests { let mut peer = sample_peer(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -469,7 +469,7 @@ mod tests { let mut previously_announced_peer = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer, &peer_ip(), @@ -480,7 +480,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -493,7 +493,7 @@ mod tests { let mut previously_announced_peer_1 = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_1, &peer_ip(), @@ -504,7 +504,7 @@ mod tests { let mut previously_announced_peer_2 = sample_peer_2(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_2, &peer_ip(), @@ -515,7 +515,7 @@ mod tests { let mut peer = sample_peer_3(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) .await .unwrap(); @@ -540,7 +540,7 @@ mod tests { let mut peer = seeder(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -554,7 +554,7 @@ mod tests { let mut peer = leecher(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -568,7 +568,7 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut started_peer, &peer_ip(), @@ -579,7 +579,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut completed_peer, &peer_ip(), diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index dacf41383..5167abf51 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -203,7 +203,7 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), @@ -215,7 +215,7 @@ mod tests { // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), @@ -225,7 +225,7 @@ mod tests { .unwrap(); // Scrape - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![info_hash]).await.unwrap(); // The expected swarm metadata for the torrent let mut expected_scrape_data = ScrapeData::empty(); @@ -259,7 +259,7 @@ mod tests { let non_whitelisted_info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); // The expected zeroed swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 443d989a6..9c94a4e50 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -107,7 +107,7 @@ impl ScrapeHandler { /// # BEP Reference: /// /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) - pub async fn scrape(&self, info_hashes: &Vec) -> Result { + pub async fn handle_scrape(&self, info_hashes: &Vec) -> Result { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { @@ -158,7 +158,7 @@ mod tests { let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); @@ -176,7 +176,7 @@ mod tests { "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // DevSkim: ignore DS173237 ]; - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index f463eee98..171d554a8 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -241,7 +241,7 @@ mod tests { peer.updated = DurationSinceUnixEpoch::new(0, 0); let _number_of_downloads_increased = services .in_memory_torrent_repository - .upsert_peer(&infohash, &peer, None) + .handle_announcement(&infohash, &peer, None) .await; // Simulate the time has passed 1 second more than the max peer timeout. @@ -259,7 +259,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; + let _number_of_downloads_increased = in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index bf8d083f8..bf63ef8d4 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -49,7 +49,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub async fn upsert_peer( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 97694a80f..16db7b635 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -252,7 +252,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); @@ -298,7 +298,7 @@ mod tests { let info_hash = InfoHash::from_str(&hash).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; @@ -325,10 +325,10 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let offset = 0; @@ -350,10 +350,10 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let offset = 1; @@ -380,13 +380,13 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; @@ -436,7 +436,7 @@ mod tests { let info_hash = sample_info_hash(); let _ = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 8a443d8f0..d4462e3f6 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -77,7 +77,7 @@ impl TestEnv { let announce_data = self .tracker_core_container .announce_handler - .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -98,7 +98,7 @@ impl TestEnv { let announce_data = self .tracker_core_container .announce_handler - .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -111,7 +111,7 @@ impl TestEnv { pub async fn scrape(&self, info_hash: &InfoHash) -> ScrapeData { self.tracker_core_container .scrape_handler - .scrape(&vec![*info_hash]) + .handle_scrape(&vec![*info_hash]) .await .unwrap() } diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 6ea237d84..a69e91d8a 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -78,7 +78,7 @@ impl AnnounceService { let announce_data = self .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) + .handle_announcement(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; self.send_event(info_hash, peer, client_socket_addr, server_service_binding) diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index b42004f63..8551351fb 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -56,7 +56,7 @@ impl ScrapeService { let scrape_data = self .scrape_handler - .scrape(&Self::convert_from_aquatic(&request.info_hashes)) + .handle_scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; self.send_event(client_socket_addr, server_service_binding).await; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index f92d5dd29..c4e0ce96f 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -39,7 +39,7 @@ where .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await; } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 567f43740..edc36ebc8 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -370,7 +370,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None) + .handle_announcement(&info_hash.0.into(), &peer_using_ipv6, None) .await; } @@ -714,7 +714,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None) + .handle_announcement(&info_hash.0.into(), &peer_using_ipv4, None) .await; } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index a9462e0f9..183d78b70 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -166,7 +166,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer, None) + .handle_announcement(&info_hash.0.into(), &peer, None) .await; } From 67d177b6d4af24608d6be5a80ed10434242c4cd4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 15:47:18 +0100 Subject: [PATCH 080/247] refactor: [#1524] command/query separation The returned value is not needed anymore. Secondary action (increase metrics) is done in the event listeners. --- .../src/environment.rs | 4 +- .../src/environment.rs | 4 +- packages/torrent-repository/src/swarm.rs | 157 +++++++----------- packages/torrent-repository/src/swarms.rs | 6 +- packages/tracker-core/src/announce_handler.rs | 3 +- packages/tracker-core/src/torrent/manager.rs | 4 +- .../src/torrent/repository/in_memory.rs | 5 +- packages/tracker-core/src/torrent/services.rs | 18 +- .../udp-tracker-server/src/environment.rs | 3 +- .../src/handlers/announce.rs | 4 +- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 11 files changed, 82 insertions(+), 128 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 59605d781..0c1431db5 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -25,12 +25,12 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) - .await + .await; } } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 3c7ff564d..be93a8723 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) - .await + .await; } } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index b9076289b..84e1f2da4 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -33,17 +33,13 @@ impl Swarm { } } - pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { - let mut downloads_increased: bool = false; - + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) { let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { - self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await + self.upsert_peer(Arc::new(*incoming_announce)).await } AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, }; - - downloads_increased } pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { @@ -159,26 +155,20 @@ impl Swarm { !self.should_be_removed(policy) } - async fn upsert_peer( - &mut self, - incoming_announce: Arc, - downloads_increased: &mut bool, - ) -> Option> { + async fn upsert_peer(&mut self, incoming_announce: Arc) -> Option> { let announcement = incoming_announce.clone(); if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - *downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + let downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); self.trigger_peer_updated_event(&previous_announce, &announcement).await; - if *downloads_increased { + if downloads_increased { self.trigger_peer_download_completed_event(&announcement).await; } Some(previous_announce) } else { - *downloads_increased = false; - self.update_metadata_on_insert(&announcement); self.trigger_peer_added_event(&announcement).await; @@ -362,36 +352,30 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased).await, None); + assert_eq!(swarm.upsert_peer(peer.into()).await, None); } #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; - assert_eq!( - swarm.upsert_peer(peer.into(), &mut downloads_increased).await, - Some(Arc::new(peer)) - ); + assert_eq!(swarm.upsert_peer(peer.into()).await, Some(Arc::new(peer))); } #[tokio::test] async fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.peers(None), [Arc::new(peer)]); } @@ -399,11 +383,10 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -411,11 +394,10 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.len(), 1); } @@ -423,11 +405,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; swarm.remove_peer(&peer.peer_addr).await; @@ -437,11 +418,10 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let old = swarm.remove_peer(&peer.peer_addr).await; @@ -461,19 +441,18 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } @@ -481,13 +460,13 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let inactive_peers_total = swarm.count_inactive_peers(last_update_time + one_second); @@ -497,13 +476,13 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second).await; @@ -514,13 +493,13 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second).await; @@ -542,7 +521,7 @@ mod tests { async fn not_empty_swarm() -> Swarm { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; + swarm.upsert_peer(PeerBuilder::default().build().into()).await; swarm } @@ -550,13 +529,12 @@ mod tests { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert!(swarm.metadata().downloads() > 0); @@ -631,17 +609,16 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.len(), 2); } @@ -649,7 +626,6 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -658,13 +634,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.len(), 1); } @@ -672,13 +648,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; assert_eq!( swarm.metadata(), @@ -693,13 +668,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -709,13 +683,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -739,13 +712,12 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -753,13 +725,12 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -768,13 +739,12 @@ mod tests { async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; assert_eq!(swarm.metadata().downloads(), downloads); } @@ -789,11 +759,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; let leechers = swarm.metadata().leechers(); @@ -805,11 +774,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; let seeders = swarm.metadata().seeders(); @@ -830,11 +798,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; let leechers = swarm.metadata().leechers(); @@ -846,11 +813,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; let seeders = swarm.metadata().seeders(); @@ -870,18 +836,17 @@ mod tests { #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -890,18 +855,17 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -910,17 +874,16 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -928,19 +891,18 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -971,8 +933,7 @@ mod tests { let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; } #[tokio::test] @@ -990,8 +951,7 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; swarm.remove_peer(&peer.peer_addr).await; } @@ -1011,8 +971,7 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Peers not updated after this time will be removed let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); @@ -1042,11 +1001,10 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Update the peer - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; } #[tokio::test] @@ -1079,11 +1037,10 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(started_peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(started_peer.into()).await; // Announce as completed - swarm.upsert_peer(completed_peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(completed_peer.into()).await; } } } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 36f83070d..1504ac1f4 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -54,7 +54,7 @@ impl Swarms { info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> Result { + ) -> Result<(), Error> { let swarm_handle = match self.swarms.get(info_hash) { None => { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); @@ -80,9 +80,9 @@ impl Swarms { let mut swarm = swarm_handle.value().lock().await; - let downloads_increased = swarm.handle_announcement(peer).await; + swarm.handle_announcement(peer).await; - Ok(downloads_increased) + Ok(()) } /// Inserts a new swarm. Only used for testing purposes. diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 7d37ec9ed..ffd244f2a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -176,8 +176,7 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let _number_of_downloads_increased = self - .in_memory_torrent_repository + self.in_memory_torrent_repository .handle_announcement(info_hash, peer, opt_persistent_torrent) .await; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 171d554a8..d9997c4ad 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -239,7 +239,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services + services .in_memory_torrent_repository .handle_announcement(&infohash, &peer, None) .await; @@ -259,7 +259,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; + in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index bf63ef8d4..5c8a335b6 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -48,17 +48,16 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - #[must_use] pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> bool { + ) { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) .await - .expect("Failed to upsert the peer in swarms") + .expect("Failed to upsert the peer in swarms"); } /// Removes inactive peers from all torrent entries. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 16db7b635..2ae51fc78 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -251,7 +251,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; @@ -297,7 +297,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; @@ -324,10 +324,10 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -349,10 +349,10 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -379,13 +379,13 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -435,7 +435,7 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index c4e0ce96f..94a166e4e 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -35,8 +35,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index edc36ebc8..e2ca6821e 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -369,7 +369,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer_using_ipv6, None) .await; } @@ -713,7 +713,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer_using_ipv4, None) .await; } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 183d78b70..8bac05c1e 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -165,7 +165,7 @@ mod tests { .with_bytes_left_to_download(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer, None) .await; } From 21752709a3703f9e791510ccea97bbcbd495bb1d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 18:39:20 +0100 Subject: [PATCH 081/247] feat: [#1535] scaffolding for tracker-core metrics New metric added: ``` tracker_core_persistent_torrents_downloads_total{} 1 ``` However, it's not persisted yet. TODO: - Persist into the database when updated. - Load from database when the tracker starts. --- Cargo.lock | 1 + .../src/v1/context/stats/handlers.rs | 2 + .../src/v1/context/stats/routes.rs | 2 + .../src/statistics/services.rs | 5 + .../src/http/client/requests/announce.rs | 8 +- packages/tracker-core/Cargo.toml | 1 + packages/tracker-core/src/container.rs | 6 +- .../src/statistics/event/handler.rs | 21 ++- .../src/statistics/event/listener.rs | 13 +- .../tracker-core/src/statistics/metrics.rs | 63 +++++++++ packages/tracker-core/src/statistics/mod.rs | 26 ++++ .../tracker-core/src/statistics/repository.rs | 132 ++++++++++++++++++ .../tracker-core/tests/common/test_env.rs | 1 + .../config/tracker.development.sqlite3.toml | 4 +- src/bootstrap/jobs/tracker_core.rs | 5 +- 15 files changed, 276 insertions(+), 14 deletions(-) create mode 100644 packages/tracker-core/src/statistics/metrics.rs create mode 100644 packages/tracker-core/src/statistics/repository.rs diff --git a/Cargo.lock b/Cargo.lock index 5415149e8..96de11cb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -678,6 +678,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-located-error", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 552958d74..3a353f1fc 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -70,6 +70,7 @@ pub async fn get_metrics_handler( Arc, Arc>, Arc, + Arc, Arc, Arc, Arc, @@ -83,6 +84,7 @@ pub async fn get_metrics_handler( state.3.clone(), state.4.clone(), state.5.clone(), + state.6.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 3eeaa8bf4..f6c661130 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -28,7 +28,9 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, ban_service: Arc>, swarms_stats_repository: Arc, + tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, udp_server_stats_repository: Arc, @@ -102,6 +103,7 @@ pub async fn get_labeled_metrics( let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let swarms_stats = swarms_stats_repository.get_metrics().await; + let tracker_core_stats = tracker_core_stats_repository.get_metrics().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -112,6 +114,9 @@ pub async fn get_labeled_metrics( metrics .merge(&swarms_stats.metric_collection) .expect("msg: failed to merge torrent repository metrics"); + metrics + .merge(&tracker_core_stats.metric_collection) + .expect("msg: failed to merge tracker core metrics"); metrics .merge(&http_stats.metric_collection) .expect("msg: failed to merge HTTP core metrics"); diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs index 7d20fbba8..29b5d1221 100644 --- a/packages/tracker-client/src/http/client/requests/announce.rs +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -53,16 +53,16 @@ pub type BaseTenASCII = u64; pub type PortNumber = u16; pub enum Event { - //Started, - //Stopped, + Started, + Stopped, Completed, } impl fmt::Display for Event { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - //Event::Started => write!(f, "started"), - //Event::Stopped => write!(f, "stopped"), + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), Event::Completed => write!(f, "completed"), } } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 3c89505b2..a2d08dfa0 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -31,6 +31,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index f4fb272de..ed56fb106 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -14,11 +14,11 @@ use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::whitelist; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::setup::initialize_whitelist_manager; +use crate::{statistics, whitelist}; pub struct TrackerCoreContainer { pub core_config: Arc, @@ -33,6 +33,7 @@ pub struct TrackerCoreContainer { pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, pub torrents_manager: Arc, + pub stats_repository: Arc, } impl TrackerCoreContainer { @@ -58,6 +59,8 @@ impl TrackerCoreContainer { &db_torrent_repository, )); + let stats_repository = Arc::new(statistics::repository::Repository::new()); + let announce_handler = Arc::new(AnnounceHandler::new( core_config, &whitelist_authorization, @@ -80,6 +83,7 @@ impl TrackerCoreContainer { in_memory_torrent_repository, db_torrent_repository, torrents_manager, + stats_repository, } } } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 7b6ce83b7..ac6d0639e 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -1,14 +1,19 @@ use std::sync::Arc; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; +use crate::statistics::repository::Repository; +use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; pub async fn handle_event( event: Event, + stats_repository: &Arc, db_torrent_repository: &Arc, - _now: DurationSinceUnixEpoch, + now: DurationSinceUnixEpoch, ) { match event { // Torrent events @@ -36,6 +41,7 @@ pub async fn handle_event( Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + // Increment the number of downloads for the torrent match db_torrent_repository.increase_number_of_downloads(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); @@ -44,6 +50,19 @@ pub async fn handle_event( tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); } } + + // Increment the number of downloads for all the torrents + let _unused = stats_repository + .increment_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + now, + ) + .await; + + // todo: + // - Persist the metric into the database. + // - Load the metric from the database. } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index e04675092..f85b2b7a0 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -6,26 +6,33 @@ use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::statistics::repository::Repository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, + repository: &Arc, db_torrent_repository: &Arc, ) -> JoinHandle<()> { + let stats_repository = repository.clone(); let db_torrent_repository: Arc = db_torrent_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver, db_torrent_repository).await; + dispatch_events(receiver, stats_repository, db_torrent_repository).await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc) { +async fn dispatch_events( + mut receiver: Receiver, + stats_repository: Arc, + db_torrent_repository: Arc, +) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -41,7 +48,7 @@ async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc { match result { - Ok(event) => handle_event(event, &db_torrent_repository, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, &db_torrent_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs new file mode 100644 index 000000000..f8ab3f9d9 --- /dev/null +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -0,0 +1,63 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } +} diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 53f112654..1cd9aac6b 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1 +1,27 @@ pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +// Torrent metrics + +const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + // Torrent metrics + + metrics.metric_collection.describe_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrent downloads (persisted).")), + ); + + metrics +} diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs new file mode 100644 index 000000000..fe1292d00 --- /dev/null +++ b/packages/tracker-core/src/statistics/repository.rs @@ -0,0 +1,132 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn increment_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + + result + } +} diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index d4462e3f6..0be8bd4c6 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -56,6 +56,7 @@ impl TestEnv { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), + &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_torrent_repository, ); diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 89d700132..17a73a1d2 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,12 +7,12 @@ schema_version = "2.0.0" threshold = "info" [core] -inactive_peer_cleanup_interval = 60 +inactive_peer_cleanup_interval = 120 listed = false private = false [core.tracker_policy] -max_peer_timeout = 30 +max_peer_timeout = 60 persistent_torrent_completed_stat = true remove_peerless_torrents = true diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index bb879db6b..37c53b9e4 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -6,11 +6,10 @@ use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { - // todo: enable this when labeled metrics are implemented. - //if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { - if config.core.tracker_policy.persistent_torrent_completed_stat { + if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( app_container.torrent_repository_container.event_bus.receiver(), + &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_torrent_repository, ); From 6f11534d49742a8b6654fe9450b683c2bd49e9a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 10:37:27 +0100 Subject: [PATCH 082/247] feat: [#1539] add method to Database trait to persis global downloads counter It does not use the new methods in production yet. --- ...3000_torrust_tracker_create_all_tables.sql | 1 + ...er_new_torrent_aggregate_metrics_table.sql | 6 ++ ...3000_torrust_tracker_create_all_tables.sql | 1 + ...er_new_torrent_aggregate_metrics_table.sql | 6 ++ .../tracker-core/src/databases/driver/mod.rs | 44 ++++++++++++ .../src/databases/driver/mysql.rs | 57 +++++++++++++++- .../src/databases/driver/sqlite.rs | 68 ++++++++++++++++++- packages/tracker-core/src/databases/mod.rs | 34 +++++++++- 8 files changed, 214 insertions(+), 3 deletions(-) create mode 100644 packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql create mode 100644 packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql diff --git a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql index 407ae4dd1..ab160bd75 100644 --- a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash VARCHAR(40) NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, diff --git a/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..36f940cc3 --- /dev/null +++ b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql index bd451bf8b..c5bcad926 100644 --- a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash TEXT NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id INTEGER PRIMARY KEY AUTOINCREMENT, diff --git a/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..34166903c --- /dev/null +++ b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 2cedab2d7..e8f0ecbfb 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -6,6 +6,9 @@ use sqlite::Sqlite; use super::error::Error; use super::Database; +/// Metric name in DB for the total number of downloads across all torrents. +const TORRENTS_DOWNLOADS_TOTAL: &str = "torrents_downloads_total"; + /// The database management system used by the tracker. /// /// Refer to: @@ -97,9 +100,14 @@ pub(crate) mod tests { // Persistent torrents (stats) + // Torrent metrics handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); handling_torrent_persistence::it_should_load_all_persistent_torrents(driver); handling_torrent_persistence::it_should_increase_the_number_of_downloads_for_a_given_torrent(driver); + // Aggregate metrics for all torrents + handling_torrent_persistence::it_should_save_and_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_increase_the_global_number_of_downloads(driver); // Authentication keys (for private trackers) @@ -154,6 +162,8 @@ pub(crate) mod tests { use crate::databases::Database; use crate::test_helpers::tests::sample_info_hash; + // Metrics per torrent + pub fn it_should_save_and_load_persistent_torrents(driver: &Arc>) { let infohash = sample_info_hash(); @@ -192,6 +202,40 @@ pub(crate) mod tests { assert_eq!(number_of_downloads, 2); } + + // Aggregate metrics for all torrents + + pub fn it_should_save_and_load_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_load_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_increase_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + driver.increase_global_number_of_downloads().unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 2); + } } mod handling_authentication_keys { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index d07f061c2..bfbc47ebd 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -15,7 +15,7 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; use crate::authentication::{self, Key}; @@ -46,6 +46,27 @@ impl Mysql { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::( + "SELECT value FROM torrent_aggregate_metrics WHERE metric_name = :metric_name", + params! { "metric_name" => metric_name }, + ); + + let persistent_torrent = query?; + + Ok(persistent_torrent) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (:metric_name, :completed) ON DUPLICATE KEY UPDATE value = VALUES(value)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + Ok(conn.exec_drop(COMMAND, params! { metric_name, completed })?) + } } impl Database for Mysql { @@ -66,6 +87,14 @@ impl Database for Mysql { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = format!( " CREATE TABLE IF NOT EXISTS `keys` ( @@ -82,6 +111,8 @@ impl Database for Mysql { conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); + conn.query_drop(&create_torrent_aggregate_metrics_table) + .expect("Could not create create_torrent_aggregate_metrics_table table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); conn.query_drop(&create_whitelist_table) .expect("Could not create whitelist table."); @@ -168,6 +199,30 @@ impl Database for Mysql { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_number_of_downloads(&self) -> Result, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + conn.exec_drop( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = :metric_name", + params! { metric_name }, + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d36f24f8b..91e969233 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -15,7 +15,7 @@ use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -49,6 +49,39 @@ impl Sqlite { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT value FROM torrent_aggregate_metrics WHERE metric_name = ?")?; + + let mut rows = stmt.query([metric_name])?; + + let persistent_torrent = rows.next()?; + + Ok(persistent_torrent.map(|f| { + let value: i64 = f.get(0).unwrap(); + u32::try_from(value).unwrap() + })) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (?1, ?2) ON CONFLICT(metric_name) DO UPDATE SET value = ?2", + [metric_name.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } } impl Database for Sqlite { @@ -69,6 +102,14 @@ impl Database for Sqlite { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -82,6 +123,7 @@ impl Database for Sqlite { conn.execute(&create_whitelist_table, [])?; conn.execute(&create_keys_table, [])?; conn.execute(&create_torrents_table, [])?; + conn.execute(&create_torrent_aggregate_metrics_table, [])?; Ok(()) } @@ -172,6 +214,30 @@ impl Database for Sqlite { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_number_of_downloads(&self) -> Result, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + let _ = conn.execute( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = ?", + [metric_name], + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 2703ab8bf..a9d6b2a22 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -131,16 +131,48 @@ pub trait Database: Sync + Send { /// It does not create a new entry if the torrent is not found and it does /// not return an error. /// + /// # Context: Torrent Metrics + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// + /// # Errors + /// + /// Returns an [`Error`] if the query failed. + fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + + /// Loads the total number of downloads for all torrents from the database. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be loaded. + fn load_global_number_of_downloads(&self) -> Result, Error>; + + /// Saves the total number of downloads for all torrents into the database. + /// + /// # Context: Torrent Metrics + /// /// # Arguments /// /// * `info_hash` - A reference to the torrent's info hash. + /// * `downloaded` - The number of times the torrent has been downloaded. + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be saved. + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + + /// Increases the total number of downloads for all torrents. /// /// # Context: Torrent Metrics /// /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + fn increase_global_number_of_downloads(&self) -> Result<(), Error>; // Whitelist From 9301e587ab8f4d565c19418ebb46a4340a1fcc9f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 10:57:20 +0100 Subject: [PATCH 083/247] feat: [#1539] save global downloads counter in DB The total number of dowloads (for all torrents) is saved in the DB, but not loaded yet. todo: load the initial value when the tracker starts. --- .../src/statistics/event/handler.rs | 34 +++++++++++-------- .../src/torrent/repository/persisted.rs | 16 +++++++++ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index ac6d0639e..e394641b8 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -41,17 +41,7 @@ pub async fn handle_event( Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); - // Increment the number of downloads for the torrent - match db_torrent_repository.increase_number_of_downloads(&info_hash) { - Ok(()) => { - tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); - } - Err(err) => { - tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); - } - } - - // Increment the number of downloads for all the torrents + // Increment the number of downloads for all the torrents in memory let _unused = stats_repository .increment_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), @@ -60,9 +50,25 @@ pub async fn handle_event( ) .await; - // todo: - // - Persist the metric into the database. - // - Load the metric from the database. + // Increment the number of downloads for the torrent in the database + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); + } + } + + // Increment the global number of downloads (for all torrents) in the database + match db_torrent_repository.increase_global_number_of_downloads() { + Ok(()) => { + tracing::debug!("Global number of downloads increased"); + } + Err(err) => { + tracing::error!(error = ?err, "Failed to increase global number of downloads"); + } + } } } } diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index dec571baf..62e3244ba 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -67,6 +67,22 @@ impl DatabasePersistentTorrentRepository { } } + /// Increases the global number of downloads for all torrent. + /// + /// If the metric is not found, it creates it. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_number_of_downloads()?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), + None => self.database.save_global_number_of_downloads(1), + } + } + /// Loads all persistent torrent metrics from the database. /// /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store From c07f3667572b9c70c72f281aabe0a2f13cebcdc3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 12:05:20 +0100 Subject: [PATCH 084/247] feat: [#1539] load global downloads counter from DB When the tracker starts. --- packages/metrics/src/counter.rs | 11 ++++ packages/metrics/src/metric/mod.rs | 4 ++ packages/metrics/src/metric_collection.rs | 43 +++++++++++++- packages/metrics/src/sample.rs | 5 ++ packages/metrics/src/sample_collection.rs | 9 +++ .../tracker-core/src/statistics/metrics.rs | 13 +++++ packages/tracker-core/src/statistics/mod.rs | 1 + .../src/statistics/persisted_metrics.rs | 57 +++++++++++++++++++ .../tracker-core/src/statistics/repository.rs | 25 ++++++++ .../src/torrent/repository/persisted.rs | 45 +++++++++------ packages/tracker-core/tests/integration.rs | 2 +- src/app.rs | 17 ++++++ 12 files changed, 214 insertions(+), 18 deletions(-) create mode 100644 packages/tracker-core/src/statistics/persisted_metrics.rs diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index 3a816c75b..ac6d21836 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -20,6 +20,10 @@ impl Counter { pub fn increment(&mut self, value: u64) { self.0 += value; } + + pub fn absolute(&mut self, value: u64) { + self.0 = value; + } } impl From for Counter { @@ -73,6 +77,13 @@ mod tests { assert_eq!(counter.value(), 3); } + #[test] + fn it_could_set_to_an_absolute_value() { + let mut counter = Counter::new(0); + counter.absolute(1); + assert_eq!(counter.value(), 1); + } + #[test] fn it_serializes_to_prometheus() { let counter = Counter::new(42); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 05779f09f..2118637b8 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -55,6 +55,10 @@ impl Metric { pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { self.sample_collection.increment(label_set, time); } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + self.sample_collection.absolute(label_set, value, time); + } } impl Metric { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 83b08f178..824397000 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -72,6 +72,8 @@ impl MetricCollection { self.counters.get_value(name, label_set) } + /// Increases the counter for the given metric name and labels. + /// /// # Errors /// /// Return an error if a metrics of a different type with the same name @@ -93,6 +95,30 @@ impl MetricCollection { Ok(()) } + /// Sets the counter for the given metric name and labels. + /// + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn set_counter( + &mut self, + name: &MetricName, + label_set: &LabelSet, + value: u64, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.gauges.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.counters.absolute(name, label_set, value, time); + + Ok(()) + } + pub fn ensure_counter_exists(&mut self, name: &MetricName) { self.counters.ensure_metric_exists(name); } @@ -361,7 +387,7 @@ impl MetricKindCollection { /// /// # Panics /// - /// Panics if the metric does not exist and it could not be created. + /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { self.ensure_metric_exists(name); @@ -370,6 +396,21 @@ impl MetricKindCollection { metric.increment(label_set, time); } + /// Sets the counter to an absolute value for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist. + pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); + + metric.absolute(label_set, value, time); + } + #[must_use] pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index 4621c9906..ad4dff00e 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -122,6 +122,11 @@ impl Measurement { self.value.increment(1); self.set_recorded_at(time); } + + pub fn absolute(&mut self, value: u64, time: DurationSinceUnixEpoch) { + self.value.absolute(value); + self.set_recorded_at(time); + } } impl Measurement { diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index ea6b4d4af..e815f26ec 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -79,6 +79,15 @@ impl SampleCollection { sample.increment(time); } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Counter::default(), time)); + + sample.absolute(value, time); + } } impl SampleCollection { diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs index f8ab3f9d9..02cc51499 100644 --- a/packages/tracker-core/src/statistics/metrics.rs +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -24,6 +24,19 @@ impl Metrics { self.metric_collection.increase_counter(metric_name, labels, now) } + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_counter(metric_name, labels, value, now) + } + /// # Errors /// /// Returns an error if the metric does not exist and it cannot be created. diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 1cd9aac6b..89d6b79d5 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1,5 +1,6 @@ pub mod event; pub mod metrics; +pub mod persisted_metrics; pub mod repository; use metrics::Metrics; diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs new file mode 100644 index 000000000..4d53236a5 --- /dev/null +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use thiserror::Error; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{metric_collection, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::repository::Repository; +use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; +use crate::databases; +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + +/// Loads persisted metrics from the database and sets them in the stats repository. +/// +/// # Errors +/// +/// This function will return an error if the database query fails or if the +/// metric collection fails to set the initial metric values. +pub async fn load_persisted_metrics( + stats_repository: &Arc, + db_torrent_repository: &Arc, + now: DurationSinceUnixEpoch, +) -> Result<(), Error> { + if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { + stats_repository + .set_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + u64::from(downloads), + now, + ) + .await?; + } + + Ok(()) +} + +#[derive(Error, Debug, Clone)] +pub enum Error { + #[error("Database error: {err}")] + DatabaseError { err: databases::error::Error }, + + #[error("Metrics error: {err}")] + MetricsError { err: metric_collection::Error }, +} + +impl From for Error { + fn from(err: databases::error::Error) -> Self { + Self::DatabaseError { err } + } +} + +impl From for Error { + fn from(err: metric_collection::Error) -> Self { + Self::MetricsError { err } + } +} diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs index fe1292d00..dd0ebebe7 100644 --- a/packages/tracker-core/src/statistics/repository.rs +++ b/packages/tracker-core/src/statistics/repository.rs @@ -57,6 +57,31 @@ impl Repository { result } + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn set_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_counter(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the counter: {}", err), + } + + result + } + /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 62e3244ba..1818065fd 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -47,6 +47,8 @@ impl DatabasePersistentTorrentRepository { } } + // Single Torrent Metrics + /// Increases the number of downloads for a given torrent. /// /// If the torrent is not found, it creates a new entry. @@ -67,22 +69,6 @@ impl DatabasePersistentTorrentRepository { } } - /// Increases the global number of downloads for all torrent. - /// - /// If the metric is not found, it creates it. - /// - /// # Errors - /// - /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { - let torrent = self.database.load_global_number_of_downloads()?; - - match torrent { - Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), - None => self.database.save_global_number_of_downloads(1), - } - } - /// Loads all persistent torrent metrics from the database. /// /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store @@ -123,6 +109,33 @@ impl DatabasePersistentTorrentRepository { pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } + + // Aggregate Metrics + + /// Increases the global number of downloads for all torrent. + /// + /// If the metric is not found, it creates it. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_number_of_downloads()?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), + None => self.database.save_global_number_of_downloads(1), + } + } + + /// Loads the global number of downloads for all torrents from the database. + /// + /// # Errors + /// + /// Returns an [`Error`] if the underlying database query fails. + pub(crate) fn load_global_number_of_downloads(&self) -> Result, Error> { + self.database.load_global_number_of_downloads() + } } #[cfg(test)] diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index d24acf67b..986bdaaf3 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -58,7 +58,7 @@ async fn it_should_handle_the_scrape_request() { } #[tokio::test] -async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { +async fn it_should_persist_the_number_of_completed_peers_for_each_torrent_into_the_database() { let mut core_config = ephemeral_configuration(); core_config.tracker_policy.persistent_torrent_completed_stat = true; diff --git a/src/app.rs b/src/app.rs index 5037ad761..571e034f5 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,6 +23,7 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; +use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; @@ -32,6 +33,7 @@ use crate::bootstrap::jobs::{ }; use crate::bootstrap::{self}; use crate::container::AppContainer; +use crate::CurrentClock; pub async fn run() -> (Arc, JobManager) { let (config, app_container) = bootstrap::app::setup(); @@ -63,6 +65,8 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; + load_torrent_metrics(config, app_container).await; + // todo: disabled because of performance issues. // The tracker demo has a lot of torrents and loading them all at once is not // efficient. We also load them on demand but the total number of downloads @@ -134,6 +138,19 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc) { + if config.core.tracker_policy.persistent_torrent_completed_stat { + bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics( + &app_container.tracker_core_container.stats_repository, + &app_container.tracker_core_container.db_torrent_repository, + CurrentClock::now(), + ) + .await + .expect("Could not load persisted metrics from database."); + } +} + fn start_torrent_repository_event_listener( config: &Configuration, app_container: &Arc, From 2c9311bf2240cbc56ccbb3ec5dfee954d666a13e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 12:54:35 +0100 Subject: [PATCH 085/247] test: [#1539] add integration test for persisted downloads counter --- .../tracker-core/tests/common/test_env.rs | 31 +++++++++++++++++++ packages/tracker-core/tests/integration.rs | 25 +++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 0be8bd4c6..4e14e9bd8 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -5,11 +5,15 @@ use aquatic_udp_protocol::AnnounceEvent; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics; use tokio::task::yield_now; use torrust_tracker_configuration::Core; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; pub struct TestEnv { @@ -45,6 +49,22 @@ impl TestEnv { } pub async fn start(&self) { + let now = DurationSinceUnixEpoch::from_secs(0); + self.load_persisted_metrics(now).await; + self.run_jobs().await; + } + + async fn load_persisted_metrics(&self, now: DurationSinceUnixEpoch) { + load_persisted_metrics( + &self.tracker_core_container.stats_repository, + &self.tracker_core_container.db_torrent_repository, + now, + ) + .await + .unwrap(); + } + + async fn run_jobs(&self) { let mut jobs = vec![]; let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( @@ -135,4 +155,15 @@ impl TestEnv { pub async fn remove_swarm(&self, info_hash: &InfoHash) { self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); } + + pub async fn get_counter_value(&self, metric_name: &str) -> u64 { + self.tracker_core_container + .stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(&MetricName::new(metric_name), &LabelSet::default()) + .unwrap() + .value() + } } diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 986bdaaf3..b170aaebd 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -86,3 +86,28 @@ async fn it_should_persist_the_number_of_completed_peers_for_each_torrent_into_t assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); } + +#[tokio::test] +async fn it_should_persist_the_global_number_of_completed_peers_into_the_database() { + let mut core_config = ephemeral_configuration(); + + core_config.tracker_policy.persistent_torrent_completed_stat = true; + + let mut test_env = TestEnv::started(core_config.clone()).await; + + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; + + // We run a new instance of the test environment to simulate a restart. + // The new instance uses the same underlying database. + + let new_test_env = TestEnv::started(core_config).await; + + assert_eq!( + new_test_env + .get_counter_value("tracker_core_persistent_torrents_downloads_total") + .await, + 1 + ); +} From 4febda494e036f772e2c473a784acf0d254d026c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 13:17:57 +0100 Subject: [PATCH 086/247] fic: [#1539] persisten metrics should be enabled by config --- .../src/statistics/event/handler.rs | 33 ++++++++++--------- .../src/statistics/event/listener.rs | 17 ++++++++-- .../tracker-core/tests/common/test_env.rs | 4 +++ src/bootstrap/jobs/tracker_core.rs | 5 +++ 4 files changed, 42 insertions(+), 17 deletions(-) diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index e394641b8..4002053e2 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -13,6 +13,7 @@ pub async fn handle_event( event: Event, stats_repository: &Arc, db_torrent_repository: &Arc, + persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { match event { @@ -50,23 +51,25 @@ pub async fn handle_event( ) .await; - // Increment the number of downloads for the torrent in the database - match db_torrent_repository.increase_number_of_downloads(&info_hash) { - Ok(()) => { - tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + if persistent_torrent_completed_stat { + // Increment the number of downloads for the torrent in the database + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); + } } - Err(err) => { - tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); - } - } - // Increment the global number of downloads (for all torrents) in the database - match db_torrent_repository.increase_global_number_of_downloads() { - Ok(()) => { - tracing::debug!("Global number of downloads increased"); - } - Err(err) => { - tracing::error!(error = ?err, "Failed to increase global number of downloads"); + // Increment the global number of downloads (for all torrents) in the database + match db_torrent_repository.increase_global_number_of_downloads() { + Ok(()) => { + tracing::debug!("Global number of downloads increased"); + } + Err(err) => { + tracing::error!(error = ?err, "Failed to increase global number of downloads"); + } } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index f85b2b7a0..cf6d35d6e 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -15,6 +15,7 @@ pub fn run_event_listener( receiver: Receiver, repository: &Arc, db_torrent_repository: &Arc, + persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); let db_torrent_repository: Arc = db_torrent_repository.clone(); @@ -22,7 +23,13 @@ pub fn run_event_listener( tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository, db_torrent_repository).await; + dispatch_events( + receiver, + stats_repository, + db_torrent_repository, + persistent_torrent_completed_stat, + ) + .await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) @@ -32,6 +39,7 @@ async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, db_torrent_repository: Arc, + persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); @@ -48,7 +56,12 @@ async fn dispatch_events( result = receiver.recv() => { match result { - Ok(event) => handle_event(event, &stats_repository, &db_torrent_repository, CurrentClock::now()).await, + Ok(event) => handle_event( + event, + &stats_repository, + &db_torrent_repository, + persistent_torrent_completed_stat, + CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 4e14e9bd8..11a4d400a 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -78,6 +78,10 @@ impl TestEnv { self.torrent_repository_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_torrent_repository, + self.tracker_core_container + .core_config + .tracker_policy + .persistent_torrent_completed_stat, ); jobs.push(job); diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index 37c53b9e4..161e69aad 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -11,6 +11,11 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Tue, 27 May 2025 14:36:23 +0100 Subject: [PATCH 087/247] refactor: [#1541] rename DatabasePersistentTorrentRepository to DatabaseDownloadsMetricRepository --- .../src/v1/handlers/announce.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 4 ++-- packages/http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 4 ++-- packages/tracker-core/src/announce_handler.rs | 6 +++--- packages/tracker-core/src/container.rs | 6 +++--- .../tracker-core/src/statistics/event/handler.rs | 4 ++-- .../tracker-core/src/statistics/event/listener.rs | 8 ++++---- .../tracker-core/src/statistics/persisted_metrics.rs | 4 ++-- packages/tracker-core/src/test_helpers.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 12 ++++++------ .../tracker-core/src/torrent/repository/persisted.rs | 12 ++++++------ packages/udp-tracker-server/src/handlers/announce.rs | 4 ++-- packages/udp-tracker-server/src/handlers/mod.rs | 4 ++-- 14 files changed, 40 insertions(+), 40 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7d7a0b386..c195b5a1f 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -120,7 +120,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; @@ -156,7 +156,7 @@ mod tests { let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index cfb3f745f..bf870b39c 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -15,7 +15,7 @@ use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemor use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; @@ -45,7 +45,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 0ad5ed143..36dd58193 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -213,7 +213,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; @@ -239,7 +239,7 @@ mod tests { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index f22f2f632..e98c1b2c4 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -177,7 +177,7 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; @@ -200,7 +200,7 @@ mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index ffd244f2a..5c79e32bf 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,7 +99,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use super::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; use crate::whitelist::authorization::WhitelistAuthorization; @@ -115,7 +115,7 @@ pub struct AnnounceHandler { in_memory_torrent_repository: Arc, /// Repository for persistent torrent data (database). - db_torrent_repository: Arc, + db_torrent_repository: Arc, } impl AnnounceHandler { @@ -125,7 +125,7 @@ impl AnnounceHandler { config: &Core, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, ) -> Self { Self { whitelist_authorization: whitelist_authorization.clone(), diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index ed56fb106..8c6f360eb 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -13,7 +13,7 @@ use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; @@ -31,7 +31,7 @@ pub struct TrackerCoreContainer { pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, - pub db_torrent_repository: Arc, + pub db_torrent_repository: Arc, pub torrents_manager: Arc, pub stats_repository: Arc, } @@ -51,7 +51,7 @@ impl TrackerCoreContainer { &in_memory_key_repository.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( core_config, diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 4002053e2..028e7bc46 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -7,12 +7,12 @@ use torrust_tracker_torrent_repository::event::Event; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, stats_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index cf6d35d6e..63c75e2f6 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -7,18 +7,18 @@ use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::repository::Repository; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); - let db_torrent_repository: Arc = db_torrent_repository.clone(); + let db_torrent_repository: Arc = db_torrent_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); @@ -38,7 +38,7 @@ pub fn run_event_listener( async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, - db_torrent_repository: Arc, + db_torrent_repository: Arc, persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs index 4d53236a5..73c52884e 100644 --- a/packages/tracker-core/src/statistics/persisted_metrics.rs +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// @@ -18,7 +18,7 @@ use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; /// metric collection fails to set the initial metric values. pub async fn load_persisted_metrics( stats_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 04fe4133b..540381c75 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -20,7 +20,7 @@ pub(crate) mod tests { use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; @@ -137,7 +137,7 @@ pub(crate) mod tests { &in_memory_whitelist.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index d9997c4ad..dfcdaf38c 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::persisted::DatabasePersistentTorrentRepository; +use super::repository::persisted::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by @@ -31,7 +31,7 @@ pub struct TorrentsManager { /// The persistent torrents repository. #[allow(dead_code)] - db_torrent_repository: Arc, + db_torrent_repository: Arc, } impl TorrentsManager { @@ -52,7 +52,7 @@ impl TorrentsManager { pub fn new( config: &Core, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, ) -> Self { Self { config: config.clone(), @@ -153,7 +153,7 @@ mod tests { use torrust_tracker_configuration::Core; use torrust_tracker_torrent_repository::Swarms; - use super::{DatabasePersistentTorrentRepository, TorrentsManager}; + use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -161,7 +161,7 @@ mod tests { struct TorrentsManagerDeps { config: Arc, in_memory_torrent_repository: Arc, - database_persistent_torrent_repository: Arc, + database_persistent_torrent_repository: Arc, } fn initialize_torrents_manager() -> (Arc, Arc) { @@ -173,7 +173,7 @@ mod tests { let swarms = Arc::new(Swarms::default()); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); - let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let database_persistent_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config, diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 1818065fd..d6c6ce263 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -19,7 +19,7 @@ use crate::databases::Database; /// /// Not all in-memory torrent data is persisted; only the aggregate metrics are /// stored. -pub struct DatabasePersistentTorrentRepository { +pub struct DatabaseDownloadsMetricRepository { /// A shared reference to the database driver implementation. /// /// The driver must implement the [`Database`] trait. This allows for @@ -28,7 +28,7 @@ pub struct DatabasePersistentTorrentRepository { database: Arc>, } -impl DatabasePersistentTorrentRepository { +impl DatabaseDownloadsMetricRepository { /// Creates a new instance of `DatabasePersistentTorrentRepository`. /// /// # Arguments @@ -41,7 +41,7 @@ impl DatabasePersistentTorrentRepository { /// A new `DatabasePersistentTorrentRepository` instance with a cloned /// reference to the provided database. #[must_use] - pub fn new(database: &Arc>) -> DatabasePersistentTorrentRepository { + pub fn new(database: &Arc>) -> DatabaseDownloadsMetricRepository { Self { database: database.clone(), } @@ -143,14 +143,14 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use super::DatabasePersistentTorrentRepository; + use super::DatabaseDownloadsMetricRepository; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; - fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { + fn initialize_db_persistent_torrent_repository() -> DatabaseDownloadsMetricRepository { let config = ephemeral_configuration(); let database = initialize_database(&config); - DatabasePersistentTorrentRepository::new(&database) + DatabaseDownloadsMetricRepository::new(&database) } #[test] diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index e2ca6821e..60788ab9c 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -836,7 +836,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; @@ -885,7 +885,7 @@ mod tests { let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 831073333..eb51e6d01 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -212,7 +212,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -275,7 +275,7 @@ pub(crate) mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, From 99adbdee9dfe7bf9846bfebeacbf0036193385bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:41:48 +0100 Subject: [PATCH 088/247] refactor: [#1541] rename symbol db_torrent_repository to db_downloads_metric_repository --- .../src/v1/handlers/announce.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 4 ++-- packages/http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 4 ++-- packages/tracker-core/src/announce_handler.rs | 8 ++++---- packages/tracker-core/src/container.rs | 10 +++++----- packages/tracker-core/src/statistics/event/handler.rs | 6 +++--- packages/tracker-core/src/statistics/event/listener.rs | 10 +++++----- .../tracker-core/src/statistics/persisted_metrics.rs | 4 ++-- packages/tracker-core/src/test_helpers.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 10 +++++----- packages/tracker-core/tests/common/test_env.rs | 4 ++-- packages/udp-tracker-server/src/handlers/announce.rs | 4 ++-- packages/udp-tracker-server/src/handlers/mod.rs | 4 ++-- src/app.rs | 2 +- src/bootstrap/jobs/tracker_core.rs | 2 +- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index c195b5a1f..108ebb33f 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -156,12 +156,12 @@ mod tests { let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index bf870b39c..06c20543e 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -45,7 +45,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -55,7 +55,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 36dd58193..7831324f0 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -239,7 +239,7 @@ mod tests { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -249,7 +249,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index e98c1b2c4..0261626a9 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -200,7 +200,7 @@ mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); @@ -208,7 +208,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 5c79e32bf..847ddd1af 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -115,7 +115,7 @@ pub struct AnnounceHandler { in_memory_torrent_repository: Arc, /// Repository for persistent torrent data (database). - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, } impl AnnounceHandler { @@ -125,13 +125,13 @@ impl AnnounceHandler { config: &Core, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, ) -> Self { Self { whitelist_authorization: whitelist_authorization.clone(), config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } @@ -169,7 +169,7 @@ impl AnnounceHandler { // downloads across all torrents. The in-memory metric will count only // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_torrent_repository.load(info_hash)? + self.db_downloads_metric_repository.load(info_hash)? } else { None }; diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 8c6f360eb..4dd795e7a 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -31,7 +31,7 @@ pub struct TrackerCoreContainer { pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, - pub db_torrent_repository: Arc, + pub db_downloads_metric_repository: Arc, pub torrents_manager: Arc, pub stats_repository: Arc, } @@ -51,12 +51,12 @@ impl TrackerCoreContainer { &in_memory_key_repository.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( core_config, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let stats_repository = Arc::new(statistics::repository::Repository::new()); @@ -65,7 +65,7 @@ impl TrackerCoreContainer { core_config, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); @@ -81,7 +81,7 @@ impl TrackerCoreContainer { whitelist_authorization, whitelist_manager, in_memory_torrent_repository, - db_torrent_repository, + db_downloads_metric_repository, torrents_manager, stats_repository, } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 028e7bc46..82c56abce 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -12,7 +12,7 @@ use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, stats_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { @@ -53,7 +53,7 @@ pub async fn handle_event( if persistent_torrent_completed_stat { // Increment the number of downloads for the torrent in the database - match db_torrent_repository.increase_number_of_downloads(&info_hash) { + match db_downloads_metric_repository.increase_number_of_downloads(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); } @@ -63,7 +63,7 @@ pub async fn handle_event( } // Increment the global number of downloads (for all torrents) in the database - match db_torrent_repository.increase_global_number_of_downloads() { + match db_downloads_metric_repository.increase_global_number_of_downloads() { Ok(()) => { tracing::debug!("Global number of downloads increased"); } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 63c75e2f6..f0d8cb7f1 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -14,11 +14,11 @@ use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; pub fn run_event_listener( receiver: Receiver, repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); - let db_torrent_repository: Arc = db_torrent_repository.clone(); + let db_downloads_metric_repository: Arc = db_downloads_metric_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); @@ -26,7 +26,7 @@ pub fn run_event_listener( dispatch_events( receiver, stats_repository, - db_torrent_repository, + db_downloads_metric_repository, persistent_torrent_completed_stat, ) .await; @@ -38,7 +38,7 @@ pub fn run_event_listener( async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); @@ -59,7 +59,7 @@ async fn dispatch_events( Ok(event) => handle_event( event, &stats_repository, - &db_torrent_repository, + &db_downloads_metric_repository, persistent_torrent_completed_stat, CurrentClock::now()).await, Err(e) => { diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs index 73c52884e..55ec91b10 100644 --- a/packages/tracker-core/src/statistics/persisted_metrics.rs +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -18,10 +18,10 @@ use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; /// metric collection fails to set the initial metric values. pub async fn load_persisted_metrics( stats_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { + if let Some(downloads) = db_downloads_metric_repository.load_global_number_of_downloads()? { stats_repository .set_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 540381c75..f8b79e4db 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -137,13 +137,13 @@ pub(crate) mod tests { &in_memory_whitelist.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index dfcdaf38c..e18e19ce0 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -31,7 +31,7 @@ pub struct TorrentsManager { /// The persistent torrents repository. #[allow(dead_code)] - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, } impl TorrentsManager { @@ -42,7 +42,7 @@ impl TorrentsManager { /// * `config` - A reference to the tracker configuration. /// * `in_memory_torrent_repository` - A shared reference to the in-memory /// repository of torrents. - /// * `db_torrent_repository` - A shared reference to the persistent + /// * `db_downloads_metric_repository` - A shared reference to the persistent /// repository for torrent metrics. /// /// # Returns @@ -52,12 +52,12 @@ impl TorrentsManager { pub fn new( config: &Core, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, ) -> Self { Self { config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } @@ -72,7 +72,7 @@ impl TorrentsManager { /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_torrent_repository.load_all()?; + let persistent_torrents = self.db_downloads_metric_repository.load_all()?; println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 11a4d400a..88b363234 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -57,7 +57,7 @@ impl TestEnv { async fn load_persisted_metrics(&self, now: DurationSinceUnixEpoch) { load_persisted_metrics( &self.tracker_core_container.stats_repository, - &self.tracker_core_container.db_torrent_repository, + &self.tracker_core_container.db_downloads_metric_repository, now, ) .await @@ -77,7 +77,7 @@ impl TestEnv { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, - &self.tracker_core_container.db_torrent_repository, + &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container .core_config .tracker_policy diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 60788ab9c..38e136a12 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -885,7 +885,7 @@ mod tests { let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock @@ -923,7 +923,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let request = AnnounceRequestBuilder::default() diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index eb51e6d01..9bbebd56e 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -275,12 +275,12 @@ pub(crate) mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/src/app.rs b/src/app.rs index 571e034f5..ac51239fc 100644 --- a/src/app.rs +++ b/src/app.rs @@ -143,7 +143,7 @@ async fn load_torrent_metrics(config: &Configuration, app_container: &Arc Date: Tue, 27 May 2025 14:45:40 +0100 Subject: [PATCH 089/247] refactor: [#1541] create folder for mod More submods will be included inside. --- packages/tracker-core/src/statistics/mod.rs | 2 +- .../src/statistics/{persisted_metrics.rs => persisted/mod.rs} | 0 packages/tracker-core/tests/common/test_env.rs | 2 +- src/app.rs | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename packages/tracker-core/src/statistics/{persisted_metrics.rs => persisted/mod.rs} (100%) diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 89d6b79d5..ff8187379 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1,6 +1,6 @@ pub mod event; pub mod metrics; -pub mod persisted_metrics; +pub mod persisted; pub mod repository; use metrics::Metrics; diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted/mod.rs similarity index 100% rename from packages/tracker-core/src/statistics/persisted_metrics.rs rename to packages/tracker-core/src/statistics/persisted/mod.rs diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 88b363234..2aafbbbad 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics; +use bittorrent_tracker_core::statistics::persisted::load_persisted_metrics; use tokio::task::yield_now; use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; diff --git a/src/app.rs b/src/app.rs index ac51239fc..c31281829 100644 --- a/src/app.rs +++ b/src/app.rs @@ -141,7 +141,7 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { - bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics( + bittorrent_tracker_core::statistics::persisted::load_persisted_metrics( &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, CurrentClock::now(), From fdbea0aa85dcef20561e7e363232eea00e3d4f6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:47:32 +0100 Subject: [PATCH 090/247] refactor: [#1541] rename mod --- packages/axum-http-tracker-server/src/v1/handlers/announce.rs | 2 +- packages/http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 2 +- packages/http-tracker-core/src/services/scrape.rs | 2 +- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/container.rs | 2 +- packages/tracker-core/src/statistics/event/handler.rs | 2 +- packages/tracker-core/src/statistics/event/listener.rs | 2 +- packages/tracker-core/src/statistics/persisted/mod.rs | 2 +- packages/tracker-core/src/test_helpers.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- .../src/torrent/repository/{persisted.rs => downloads.rs} | 0 packages/tracker-core/src/torrent/repository/mod.rs | 2 +- packages/udp-tracker-server/src/handlers/announce.rs | 2 +- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 15 files changed, 14 insertions(+), 14 deletions(-) rename packages/tracker-core/src/torrent/repository/{persisted.rs => downloads.rs} (100%) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 108ebb33f..68e0825f4 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -120,7 +120,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 06c20543e..2798203ae 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -15,7 +15,7 @@ use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemor use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 7831324f0..7f3e553e4 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -213,7 +213,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 0261626a9..f10f00732 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -177,7 +177,7 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 847ddd1af..9a1c92efa 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,7 +99,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use super::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; use crate::whitelist::authorization::WhitelistAuthorization; diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 4dd795e7a..b2bcdebb3 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -13,7 +13,7 @@ use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 82c56abce..028f32030 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -7,7 +7,7 @@ use torrust_tracker_torrent_repository::event::Event; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index f0d8cb7f1..23b6e648a 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -7,7 +7,7 @@ use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::repository::Repository; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index 55ec91b10..4475f9647 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index f8b79e4db..c10d3dd3e 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -20,7 +20,7 @@ pub(crate) mod tests { use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index e18e19ce0..f86e9442e 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::persisted::DatabaseDownloadsMetricRepository; +use super::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/downloads.rs similarity index 100% rename from packages/tracker-core/src/torrent/repository/persisted.rs rename to packages/tracker-core/src/torrent/repository/downloads.rs diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index ae789e5e9..fd0382025 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,3 +1,3 @@ //! Torrent repository implementations. pub mod in_memory; -pub mod persisted; +pub mod downloads; diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 38e136a12..555d047d0 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -836,7 +836,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 9bbebd56e..3957f63c3 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -212,7 +212,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; From 7e27d31bcfff7b5653adc6df99e9b87caf8eed59 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:52:30 +0100 Subject: [PATCH 091/247] refactor: [#1541] move mod --- packages/axum-http-tracker-server/src/v1/handlers/announce.rs | 2 +- packages/http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 2 +- packages/http-tracker-core/src/services/scrape.rs | 2 +- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/container.rs | 2 +- packages/tracker-core/src/statistics/event/handler.rs | 2 +- packages/tracker-core/src/statistics/event/listener.rs | 2 +- .../{torrent/repository => statistics/persisted}/downloads.rs | 0 packages/tracker-core/src/statistics/persisted/mod.rs | 4 +++- packages/tracker-core/src/test_helpers.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- packages/tracker-core/src/torrent/repository/mod.rs | 1 - packages/udp-tracker-server/src/handlers/announce.rs | 2 +- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 15 files changed, 15 insertions(+), 14 deletions(-) rename packages/tracker-core/src/{torrent/repository => statistics/persisted}/downloads.rs (100%) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 68e0825f4..16ff83f81 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -119,8 +119,8 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 2798203ae..414d3b40e 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -14,8 +14,8 @@ use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 7f3e553e4..23d589bce 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -212,8 +212,8 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index f10f00732..1445ffcfe 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -176,8 +176,8 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 9a1c92efa..501993ad5 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,8 +99,8 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; /// Handles `announce` requests from `BitTorrent` clients. diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index b2bcdebb3..02af67118 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -11,9 +11,9 @@ use crate::authentication::service::AuthenticationService; use crate::databases::setup::initialize_database; use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 028f32030..0001b43ce 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -5,9 +5,9 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 23b6e648a..2702aa858 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -6,8 +6,8 @@ use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] diff --git a/packages/tracker-core/src/torrent/repository/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs similarity index 100% rename from packages/tracker-core/src/torrent/repository/downloads.rs rename to packages/tracker-core/src/statistics/persisted/downloads.rs diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index 4475f9647..f675b4ebc 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -1,3 +1,5 @@ +pub mod downloads; + use std::sync::Arc; use thiserror::Error; @@ -8,7 +10,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index c10d3dd3e..62649cd22 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -19,8 +19,8 @@ pub(crate) mod tests { use crate::announce_handler::AnnounceHandler; use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; + use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index f86e9442e..b7c6d5117 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index fd0382025..d8325dec5 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,3 +1,2 @@ //! Torrent repository implementations. pub mod in_memory; -pub mod downloads; diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 555d047d0..2fc3f6e63 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -835,8 +835,8 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 3957f63c3..df550ab72 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -211,8 +211,8 @@ pub(crate) mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; From 0508a6a11e6050715a712005384c65659bfecf4e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:04:51 +0100 Subject: [PATCH 092/247] refactor: [#1541] rename methods --- packages/tracker-core/src/announce_handler.rs | 2 +- .../tracker-core/src/databases/driver/mod.rs | 28 +++++------ .../src/databases/driver/mysql.rs | 14 +++--- .../src/databases/driver/sqlite.rs | 14 +++--- packages/tracker-core/src/databases/mod.rs | 14 +++--- .../src/statistics/event/handler.rs | 4 +- .../src/statistics/persisted/downloads.rs | 46 +++++++++---------- .../src/statistics/persisted/mod.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 12 ++--- 9 files changed, 68 insertions(+), 68 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 501993ad5..a6614361a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -169,7 +169,7 @@ impl AnnounceHandler { // downloads across all torrents. The in-memory metric will count only // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_downloads_metric_repository.load(info_hash)? + self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? } else { None }; diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index e8f0ecbfb..6c849bb70 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -169,9 +169,9 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -181,9 +181,9 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let torrents = driver.load_persistent_torrents().unwrap(); + let torrents = driver.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.len(), 1); assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); @@ -194,11 +194,11 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - driver.increase_number_of_downloads(&infohash).unwrap(); + driver.increase_downloads_for_torrent(&infohash).unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); assert_eq!(number_of_downloads, 2); } @@ -208,9 +208,9 @@ pub(crate) mod tests { pub fn it_should_save_and_load_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -218,9 +218,9 @@ pub(crate) mod tests { pub fn it_should_load_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -228,11 +228,11 @@ pub(crate) mod tests { pub fn it_should_increase_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - driver.increase_global_number_of_downloads().unwrap(); + driver.increase_global_downloads().unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 2); } diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index bfbc47ebd..ce76ce563 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -146,7 +146,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -161,7 +161,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -175,7 +175,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -186,7 +186,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); @@ -200,17 +200,17 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_number_of_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). - fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + fn increase_global_downloads(&self) -> Result<(), Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let metric_name = TORRENTS_DOWNLOADS_TOTAL; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 91e969233..794f65a4c 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -152,7 +152,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -168,7 +168,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; @@ -184,7 +184,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -203,7 +203,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let _ = conn.execute( @@ -215,17 +215,17 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_number_of_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). - fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + fn increase_global_downloads(&self) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let metric_name = TORRENTS_DOWNLOADS_TOTAL; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index a9d6b2a22..b637219ad 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -101,7 +101,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrents(&self) -> Result; + fn load_all_torrents_downloads(&self) -> Result; /// Loads torrent metrics data from the database for one torrent. /// @@ -110,7 +110,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error>; + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; /// Saves torrent metrics data into the database. /// @@ -124,7 +124,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be saved. - fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; /// Increases the number of downloads for a given torrent. /// @@ -140,7 +140,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error>; /// Loads the total number of downloads for all torrents from the database. /// @@ -149,7 +149,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be loaded. - fn load_global_number_of_downloads(&self) -> Result, Error>; + fn load_global_downloads(&self) -> Result, Error>; /// Saves the total number of downloads for all torrents into the database. /// @@ -163,7 +163,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be saved. - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; /// Increases the total number of downloads for all torrents. /// @@ -172,7 +172,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_global_number_of_downloads(&self) -> Result<(), Error>; + fn increase_global_downloads(&self) -> Result<(), Error>; // Whitelist diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 0001b43ce..0909dc184 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -53,7 +53,7 @@ pub async fn handle_event( if persistent_torrent_completed_stat { // Increment the number of downloads for the torrent in the database - match db_downloads_metric_repository.increase_number_of_downloads(&info_hash) { + match db_downloads_metric_repository.increase_downloads_for_torrent(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); } @@ -63,7 +63,7 @@ pub async fn handle_event( } // Increment the global number of downloads (for all torrents) in the database - match db_downloads_metric_repository.increase_global_number_of_downloads() { + match db_downloads_metric_repository.increase_global_downloads() { Ok(()) => { tracing::debug!("Global number of downloads increased"); } diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index d6c6ce263..7edaf73d8 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -60,12 +60,12 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { - let torrent = self.load(info_hash)?; + pub(crate) fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { + let torrent = self.load_torrent_downloads(info_hash)?; match torrent { - Some(_number_of_downloads) => self.database.increase_number_of_downloads(info_hash), - None => self.save(info_hash, 1), + Some(_number_of_downloads) => self.database.increase_downloads_for_torrent(info_hash), + None => self.save_torrent_downloads(info_hash, 1), } } @@ -77,8 +77,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_all(&self) -> Result { - self.database.load_persistent_torrents() + pub(crate) fn load_all_torrents_downloads(&self) -> Result { + self.database.load_all_torrents_downloads() } /// Loads one persistent torrent metrics from the database. @@ -89,8 +89,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load(&self, info_hash: &InfoHash) -> Result, Error> { - self.database.load_persistent_torrent(info_hash) + pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + self.database.load_torrent_downloads(info_hash) } /// Saves the persistent torrent metric into the database. @@ -106,8 +106,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { - self.database.save_persistent_torrent(info_hash, downloaded) + pub(crate) fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + self.database.save_torrent_downloads(info_hash, downloaded) } // Aggregate Metrics @@ -119,12 +119,12 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { - let torrent = self.database.load_global_number_of_downloads()?; + pub(crate) fn increase_global_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_downloads()?; match torrent { - Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), - None => self.database.save_global_number_of_downloads(1), + Some(_number_of_downloads) => self.database.increase_global_downloads(), + None => self.database.save_global_downloads(1), } } @@ -133,8 +133,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_global_number_of_downloads(&self) -> Result, Error> { - self.database.load_global_number_of_downloads() + pub(crate) fn load_global_downloads(&self) -> Result, Error> { + self.database.load_global_downloads() } } @@ -159,9 +159,9 @@ mod tests { let infohash = sample_info_hash(); - repository.save(&infohash, 1).unwrap(); + repository.save_torrent_downloads(&infohash, 1).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -172,9 +172,9 @@ mod tests { let infohash = sample_info_hash(); - repository.increase_number_of_downloads(&infohash).unwrap(); + repository.increase_downloads_for_torrent(&infohash).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -186,10 +186,10 @@ mod tests { let infohash_one = sample_info_hash_one(); let infohash_two = sample_info_hash_two(); - repository.save(&infohash_one, 1).unwrap(); - repository.save(&infohash_two, 2).unwrap(); + repository.save_torrent_downloads(&infohash_one, 1).unwrap(); + repository.save_torrent_downloads(&infohash_two, 2).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); let mut expected_torrents = PersistentTorrents::new(); expected_torrents.insert(infohash_one, 1); diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index f675b4ebc..86c28370d 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -23,7 +23,7 @@ pub async fn load_persisted_metrics( db_downloads_metric_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - if let Some(downloads) = db_downloads_metric_repository.load_global_number_of_downloads()? { + if let Some(downloads) = db_downloads_metric_repository.load_global_downloads()? { stats_repository .set_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index b7c6d5117..766fa5c4a 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -29,8 +29,7 @@ pub struct TorrentsManager { /// The in-memory torrents repository. in_memory_torrent_repository: Arc, - /// The persistent torrents repository. - #[allow(dead_code)] + /// The download metrics repository. db_downloads_metric_repository: Arc, } @@ -72,9 +71,7 @@ impl TorrentsManager { /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_downloads_metric_repository.load_all()?; - - println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); + let persistent_torrents = self.db_downloads_metric_repository.load_all_torrents_downloads()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); @@ -197,7 +194,10 @@ mod tests { let infohash = sample_info_hash(); - services.database_persistent_torrent_repository.save(&infohash, 1).unwrap(); + services + .database_persistent_torrent_repository + .save_torrent_downloads(&infohash, 1) + .unwrap(); torrents_manager.load_torrents_from_database().unwrap(); From a5a80b5de923957eaee81c474110ea443b2cd5a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:09:40 +0100 Subject: [PATCH 093/247] refactor: [#1541] rename type alias PersistentTorrent to NumberOfDownloads --- packages/primitives/src/lib.rs | 4 ++-- .../src/repository/dash_map_mutex_std.rs | 4 ++-- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_tokio.rs | 4 ++-- .../src/repository/skip_map_mutex_std.rs | 8 ++++---- .../tests/common/repo.rs | 4 ++-- packages/torrent-repository/src/swarms.rs | 4 ++-- packages/tracker-core/src/databases/driver/mysql.rs | 12 ++++++------ packages/tracker-core/src/databases/driver/sqlite.rs | 12 ++++++------ packages/tracker-core/src/databases/mod.rs | 8 ++++---- .../src/statistics/persisted/downloads.rs | 6 +++--- .../tracker-core/src/torrent/repository/in_memory.rs | 4 ++-- 17 files changed, 48 insertions(+), 48 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index c901e5276..b04991eb8 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -18,5 +18,5 @@ use bittorrent_primitives::info_hash::InfoHash; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -pub type PersistentTorrent = u32; -pub type PersistentTorrents = BTreeMap; +pub type NumberOfDownloads = u32; +pub type PersistentTorrents = BTreeMap; diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index d4a84caa0..c0ef455d4 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -5,7 +5,7 @@ use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -22,7 +22,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided if let Some(entry) = self.torrents.get(info_hash) { diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 9284ff6e6..2ad7a3927 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -23,7 +23,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } @@ -40,7 +40,7 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> impl std::future::Future + Send; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; } diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index d190718af..c0e4d5cf5 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -45,7 +45,7 @@ impl Repository for TorrentsRwLockStd where EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let mut db = self.get_torrents_mut(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 1764b94e8..30aabc799 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -32,7 +32,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let maybe_entry = self.get_torrents().get(info_hash).cloned(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index 116c1ff87..f56322654 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -8,7 +8,7 @@ use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -40,7 +40,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 53838023d..091ff303d 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -50,7 +50,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index eb7e300fd..542ad7f0a 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index c8ebaf4d6..2551972b3 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 8a15a9442..7d141facb 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -5,7 +5,7 @@ use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where /// /// Returns `true` if the number of downloads was increased because the peer /// completed the download. - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { existing_entry.value().upsert_peer(peer) } else { @@ -146,7 +146,7 @@ where EntryRwLockParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); @@ -239,7 +239,7 @@ where EntryMutexParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index 6c5c6ff77..3371e3c64 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, @@ -29,7 +29,7 @@ impl Repo { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> bool { match self { Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 1504ac1f4..9c1f3d9b2 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -7,7 +7,7 @@ use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use crate::event::sender::Sender; use crate::event::Event; @@ -53,7 +53,7 @@ impl Swarms { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> Result<(), Error> { let swarm_handle = match self.swarms.get(info_hash) { None => { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index ce76ce563..a5dfc50e5 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; @@ -47,7 +47,7 @@ impl Mysql { Ok(Self { pool }) } - fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -60,7 +60,7 @@ impl Mysql { Ok(persistent_torrent) } - fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (:metric_name, :completed) ON DUPLICATE KEY UPDATE value = VALUES(value)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -161,7 +161,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -200,12 +200,12 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 794f65a4c..d4b6a82c6 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; @@ -50,7 +50,7 @@ impl Sqlite { Ok(Self { pool }) } - fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT value FROM torrent_aggregate_metrics WHERE metric_name = ?")?; @@ -65,7 +65,7 @@ impl Sqlite { })) } - fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -168,7 +168,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; @@ -215,12 +215,12 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index b637219ad..6147873f6 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use self::error::Error; use crate::authentication::{self, Key}; @@ -110,7 +110,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; /// Saves torrent metrics data into the database. /// @@ -149,7 +149,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be loaded. - fn load_global_downloads(&self) -> Result, Error>; + fn load_global_downloads(&self) -> Result, Error>; /// Saves the total number of downloads for all torrents into the database. /// @@ -163,7 +163,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be saved. - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error>; /// Increases the total number of downloads for all torrents. /// diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 7edaf73d8..2e2ae3926 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use crate::databases::error::Error; use crate::databases::Database; @@ -89,7 +89,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { self.database.load_torrent_downloads(info_hash) } @@ -133,7 +133,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_global_downloads(&self) -> Result, Error> { + pub(crate) fn load_global_downloads(&self) -> Result, Error> { self.database.load_global_downloads() } } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 5c8a335b6..e44bd774f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -6,7 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. @@ -52,7 +52,7 @@ impl InMemoryTorrentRepository { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) From bcf2338b04b5953dd75f2072365baa0dacff6b16 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:12:26 +0100 Subject: [PATCH 094/247] refactor: [#1541] rename type alias PersistentTorrents to NumberOfDownloadsBTreeMap --- packages/primitives/src/lib.rs | 2 +- .../src/repository/dash_map_mutex_std.rs | 4 ++-- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_tokio.rs | 4 ++-- .../src/repository/skip_map_mutex_std.rs | 8 ++++---- .../tests/common/repo.rs | 4 ++-- .../tests/repository/mod.rs | 12 ++++++------ packages/torrent-repository/src/swarms.rs | 12 ++++++------ packages/tracker-core/src/databases/driver/mysql.rs | 4 ++-- packages/tracker-core/src/databases/driver/sqlite.rs | 4 ++-- packages/tracker-core/src/databases/mod.rs | 4 ++-- .../src/statistics/persisted/downloads.rs | 8 ++++---- .../tracker-core/src/torrent/repository/in_memory.rs | 4 ++-- 18 files changed, 48 insertions(+), 48 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index b04991eb8..ec2edda97 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -19,4 +19,4 @@ use bittorrent_primitives::info_hash::InfoHash; pub type DurationSinceUnixEpoch = Duration; pub type NumberOfDownloads = u32; -pub type PersistentTorrents = BTreeMap; +pub type NumberOfDownloadsBTreeMap = BTreeMap; diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index c0ef455d4..192777b32 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -5,7 +5,7 @@ use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -77,7 +77,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 2ad7a3927..890088ea7 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -19,7 +19,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; fn get_metrics(&self) -> AggregateSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap); fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); @@ -32,7 +32,7 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index c0e4d5cf5..074725674 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -92,7 +92,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, downloaded) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 30aabc799..9577a42e1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -87,7 +87,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index f56322654..73cb64a08 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -8,7 +8,7 @@ use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -101,7 +101,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 091ff303d..9d7d591fc 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -98,7 +98,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index 542ad7f0a..6ad7ade98 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -92,7 +92,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index 2551972b3..6ce6c3f58 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -95,7 +95,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 7d141facb..81fc1c05a 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -5,7 +5,7 @@ use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -100,7 +100,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -193,7 +193,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -286,7 +286,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index 3371e3c64..e5037d641 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, @@ -144,7 +144,7 @@ impl Repo { } } - pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub(crate) async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { match self { Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index 6973f38bd..141faa8a9 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -7,7 +7,7 @@ use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use torrust_tracker_torrent_repository_benchmarking::entry::Entry as _; use torrust_tracker_torrent_repository_benchmarking::repository::dash_map_mutex_std::XacrimonDashMap; use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_std::RwLockStd; @@ -167,12 +167,12 @@ fn many_hashed_in_order() -> Entries { } #[fixture] -fn persistent_empty() -> PersistentTorrents { - PersistentTorrents::default() +fn persistent_empty() -> NumberOfDownloadsBTreeMap { + NumberOfDownloadsBTreeMap::default() } #[fixture] -fn persistent_single() -> PersistentTorrents { +fn persistent_single() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -182,7 +182,7 @@ fn persistent_single() -> PersistentTorrents { } #[fixture] -fn persistent_three() -> PersistentTorrents { +fn persistent_three() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -445,7 +445,7 @@ async fn it_should_import_persistent_torrents( )] repo: Repo, #[case] entries: Entries, - #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: NumberOfDownloadsBTreeMap, ) { make(&repo, &entries).await; diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 9c1f3d9b2..ba8a80a62 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -7,7 +7,7 @@ use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::event::sender::Sender; use crate::event::Event; @@ -356,7 +356,7 @@ impl Swarms { /// This method takes a set of persisted torrent entries (e.g., from a /// database) and imports them into the in-memory repository for immediate /// access. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> u64 { + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> u64 { tracing::info!("Importing persisted info about torrents ..."); let mut torrents_imported = 0; @@ -1271,7 +1271,7 @@ mod tests { use std::sync::Arc; - use torrust_tracker_primitives::PersistentTorrents; + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use crate::swarms::Swarms; use crate::tests::{leecher, sample_info_hash}; @@ -1282,7 +1282,7 @@ mod tests { let infohash = sample_info_hash(); - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, 1); @@ -1302,7 +1302,7 @@ mod tests { let infohash = sample_info_hash(); - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, 1); persistent_torrents.insert(infohash, 2); @@ -1327,7 +1327,7 @@ mod tests { // Try to import the torrent entry let new_number_of_downloads = initial_number_of_downloads + 1; - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, new_number_of_downloads); swarms.import_persistent(&persistent_torrents); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index a5dfc50e5..da2f86ce8 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; @@ -146,7 +146,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_all_torrents_downloads(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d4b6a82c6..d08351aa8 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; @@ -152,7 +152,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_all_torrents_downloads(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 6147873f6..c9d89769a 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use self::error::Error; use crate::authentication::{self, Key}; @@ -101,7 +101,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_all_torrents_downloads(&self) -> Result; + fn load_all_torrents_downloads(&self) -> Result; /// Loads torrent metrics data from the database for one torrent. /// diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 2e2ae3926..4d3bdf9a3 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::databases::error::Error; use crate::databases::Database; @@ -77,7 +77,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_all_torrents_downloads(&self) -> Result { + pub(crate) fn load_all_torrents_downloads(&self) -> Result { self.database.load_all_torrents_downloads() } @@ -141,7 +141,7 @@ impl DatabaseDownloadsMetricRepository { #[cfg(test)] mod tests { - use torrust_tracker_primitives::PersistentTorrents; + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use super::DatabaseDownloadsMetricRepository; use crate::databases::setup::initialize_database; @@ -191,7 +191,7 @@ mod tests { let torrents = repository.load_all_torrents_downloads().unwrap(); - let mut expected_torrents = PersistentTorrents::new(); + let mut expected_torrents = NumberOfDownloadsBTreeMap::new(); expected_torrents.insert(infohash_one, 1); expected_torrents.insert(infohash_two, 2); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e44bd774f..164f46c69 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -6,7 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. @@ -264,7 +264,7 @@ impl InMemoryTorrentRepository { /// # Arguments /// /// * `persistent_torrents` - A reference to the persisted torrent data. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { self.swarms.import_persistent(persistent_torrents); } } From bd6e06acaaebffad76a69249a5faa3402db501a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:14:09 +0100 Subject: [PATCH 095/247] refactor: [#1541] remove unused code --- src/app.rs | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/src/app.rs b/src/app.rs index c31281829..ccc2e8bcb 100644 --- a/src/app.rs +++ b/src/app.rs @@ -66,13 +66,6 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { @@ -127,18 +120,6 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { - if config.core.tracker_policy.persistent_torrent_completed_stat { - app_container - .tracker_core_container - .torrents_manager - .load_torrents_from_database() - .expect("Could not load torrents from database."); - } -} - -#[allow(dead_code)] async fn load_torrent_metrics(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { bittorrent_tracker_core::statistics::persisted::load_persisted_metrics( From 3d6fc651d2cb515a3147264554f0db6f4c7ace12 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:14:01 +0100 Subject: [PATCH 096/247] refactor: [#1543] rename AggregateSwarmMetadata to AggregateActiveSwarmMetadata Aggregate values are only for active swarms. For example, it does not count downloads for torrents that are not currently active. --- .../src/v1/context/stats/resources.rs | 4 ++-- .../src/statistics/services.rs | 8 ++++---- packages/primitives/src/swarm_metadata.rs | 13 ++++++------- .../src/statistics/services.rs | 8 ++++---- .../src/repository/dash_map_mutex_std.rs | 6 +++--- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 6 +++--- .../src/repository/rw_lock_std_mutex_std.rs | 6 +++--- .../src/repository/rw_lock_std_mutex_tokio.rs | 6 +++--- .../src/repository/rw_lock_tokio.rs | 6 +++--- .../src/repository/rw_lock_tokio_mutex_std.rs | 6 +++--- .../repository/rw_lock_tokio_mutex_tokio.rs | 6 +++--- .../src/repository/skip_map_mutex_std.rs | 14 +++++++------- .../tests/common/repo.rs | 4 ++-- .../tests/repository/mod.rs | 4 ++-- packages/torrent-repository/src/swarms.rs | 18 +++++++++--------- .../src/torrent/repository/in_memory.rs | 4 ++-- .../src/statistics/services.rs | 8 ++++---- .../src/statistics/services.rs | 8 ++++---- 19 files changed, 70 insertions(+), 71 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 8fcfd1be0..8b6d639c8 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -136,7 +136,7 @@ impl From for LabeledStats { mod tests { use torrust_rest_tracker_api_core::statistics::metrics::Metrics; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use super::Stats; @@ -145,7 +145,7 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata { + torrents_metrics: AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 2, total_incomplete: 3, diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index af1e30524..dbc096030 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -34,7 +34,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -72,7 +72,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::event::bus::EventBus; @@ -109,7 +109,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index a70298d71..57ba816d3 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -46,24 +46,23 @@ impl SwarmMetadata { /// Structure that holds aggregate swarm metadata. /// -/// Metrics are aggregate values for all torrents. +/// Metrics are aggregate values for all active torrents/swarms. #[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct AggregateSwarmMetadata { - /// Total number of peers that have ever completed downloading for all - /// torrents. +pub struct AggregateActiveSwarmMetadata { + /// Total number of peers that have ever completed downloading. pub total_downloaded: u64, - /// Total number of seeders for all torrents. + /// Total number of seeders. pub total_complete: u64, - /// Total number of leechers for all torrents. + /// Total number of leechers. pub total_incomplete: u64, /// Total number of torrents. pub total_torrents: u64, } -impl AddAssign for AggregateSwarmMetadata { +impl AddAssign for AggregateActiveSwarmMetadata { fn add_assign(&mut self, rhs: Self) { self.total_complete += rhs.total_complete; self.total_downloaded += rhs.total_downloaded; diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 8fb29e7bd..4a471a3ef 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -5,7 +5,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_udp_tracker_server::statistics as udp_server_statistics; use crate::statistics::metrics::Metrics; @@ -16,7 +16,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -144,7 +144,7 @@ mod tests { use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics::metrics::Metrics; @@ -187,7 +187,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: Metrics::default(), } ); diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index 192777b32..fec94b4a5 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -46,8 +46,8 @@ where maybe_entry.map(|entry| entry.clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 890088ea7..cf58838a1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; pub mod dash_map_mutex_std; @@ -17,7 +17,7 @@ use std::fmt::Debug; pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; - fn get_metrics(&self) -> AggregateSwarmMetadata; + fn get_metrics(&self) -> AggregateActiveSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap); fn remove(&self, key: &InfoHash) -> Option; @@ -30,7 +30,7 @@ pub trait Repository: Debug + Default + Sized + 'static { #[allow(clippy::module_name_repetitions)] pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index 074725674..5000579dd 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -64,8 +64,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 9577a42e1..085256ff1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -59,8 +59,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index 73cb64a08..9fd451149 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -7,7 +7,7 @@ use futures::future::join_all; use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -85,8 +85,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); let entries: Vec<_> = self.get_torrents().values().cloned().collect(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 9d7d591fc..e85200aeb 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -84,8 +84,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index 6ad7ade98..8d6584713 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -78,8 +78,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index 6ce6c3f58..c8f499e03 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -81,8 +81,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata().await; diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 81fc1c05a..0432b13d0 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -69,8 +69,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); @@ -162,8 +162,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().read().get_swarm_metadata(); @@ -255,8 +255,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index e5037d641..2987240ef 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ @@ -75,7 +75,7 @@ impl Repo { } } - pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { + pub(crate) async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { match self { Repo::RwLockStd(repo) => repo.get_metrics(), Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index 141faa8a9..e555654ca 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -402,11 +402,11 @@ async fn it_should_get_metrics( repo: Repo, #[case] entries: Entries, ) { - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; make(&repo, &entries).await; - let mut metrics = AggregateSwarmMetadata::default(); + let mut metrics = AggregateActiveSwarmMetadata::default(); for (_, torrent) in entries { let stats = torrent.get_swarm_metadata(); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index ba8a80a62..f0b3233b6 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -6,7 +6,7 @@ use tokio::sync::Mutex; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::event::sender::Sender; @@ -394,8 +394,8 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub async fn get_aggregate_swarm_metadata(&self) -> Result { - let mut metrics = AggregateSwarmMetadata::default(); + pub async fn get_aggregate_swarm_metadata(&self) -> Result { + let mut metrics = AggregateActiveSwarmMetadata::default(); for swarm_handle in &self.swarms { let swarm = swarm_handle.value().lock().await; @@ -1055,7 +1055,7 @@ mod tests { use std::sync::Arc; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::swarms::Swarms; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; @@ -1070,7 +1070,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 0, @@ -1092,7 +1092,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 1, @@ -1114,7 +1114,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 0, total_incomplete: 0, @@ -1136,7 +1136,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 0, total_incomplete: 0, @@ -1164,7 +1164,7 @@ mod tests { assert_eq!( (aggregate_swarm_metadata), - (AggregateSwarmMetadata { + (AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 1_000_000, diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 164f46c69..ffd885c4f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; @@ -226,7 +226,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub async fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateActiveSwarmMetadata { self.swarms .get_aggregate_swarm_metadata() .await diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 20ba2ea7f..24d25a25c 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -39,7 +39,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -50,7 +50,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -89,7 +89,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; use crate::statistics::repository::Repository; @@ -106,7 +106,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index c8b24a744..e6e5a28f3 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -41,7 +41,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -52,7 +52,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -109,7 +109,7 @@ mod tests { use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; use crate::statistics::repository::Repository; @@ -132,7 +132,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); From e1076142feea8062691da139f9b7ff38be59491f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:19:12 +0100 Subject: [PATCH 097/247] chore: [#1543] remove comment on tracker-core handle_announcement We need to load the number of downloads for the torrent before adding it to the active swarms becuase the scrape response includes the number of downloads, and that number should included all downloads ever. --- packages/tracker-core/src/announce_handler.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index a6614361a..f74c135e3 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -163,11 +163,6 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; - // This will be removed in the future. - // See https://github.com/torrust/torrust-tracker/issues/1502 - // There will be a persisted metric for counting the total number of - // downloads across all torrents. The in-memory metric will count only - // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? } else { From 762bf6905477866ae2cf2a676255050d7a522d7f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:39:17 +0100 Subject: [PATCH 098/247] refactor: [#1543] Optimization: Don't load number of downloads from DB if not needed --- packages/torrent-repository/src/swarms.rs | 4 ++++ packages/tracker-core/src/announce_handler.rs | 24 ++++++++++++------- .../src/torrent/repository/in_memory.rs | 6 +++++ 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index f0b3233b6..8e7bc24de 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -467,6 +467,10 @@ impl Swarms { pub fn is_empty(&self) -> bool { self.swarms.is_empty() } + + pub fn contains(&self, key: &InfoHash) -> bool { + self.swarms.contains_key(key) + } } #[derive(thiserror::Error, Debug, Clone)] diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index f74c135e3..0b6bffd31 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -96,9 +96,10 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::{peer, NumberOfDownloads}; use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::databases; use crate::error::AnnounceError; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; @@ -163,21 +164,28 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; - let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? - } else { - None - }; - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); self.in_memory_torrent_repository - .handle_announcement(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, self.load_downloads_metric_if_needed(info_hash)?) .await; Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } + /// Loads the number of downloads for a torrent if needed. + fn load_downloads_metric_if_needed( + &self, + info_hash: &InfoHash, + ) -> Result, databases::error::Error> { + if self.config.tracker_policy.persistent_torrent_completed_stat && !self.in_memory_torrent_repository.contains(info_hash) + { + Ok(self.db_downloads_metric_repository.load_torrent_downloads(info_hash)?) + } else { + Ok(None) + } + } + /// Builds the announce data for the peer making the request. async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ffd885c4f..cc873726d 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -267,4 +267,10 @@ impl InMemoryTorrentRepository { pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { self.swarms.import_persistent(persistent_torrents); } + + /// Checks if the repository contains a torrent entry for the given infohash. + #[must_use] + pub fn contains(&self, info_hash: &InfoHash) -> bool { + self.swarms.contains(info_hash) + } } From 02c33f6972eef36058afee9f0ee7180b51b5d072 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 11:34:42 +0100 Subject: [PATCH 099/247] fix: [#1543] the downloads counter values returned in the API It now returns the persisted value when available (stats persistence enabled). --- Cargo.lock | 1 + .../src/v1/context/stats/handlers.rs | 15 ++++- .../src/v1/context/stats/routes.rs | 3 + packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 56 +++++++++++++++++-- .../torrent-repository/src/statistics/mod.rs | 2 +- packages/tracker-core/src/statistics/mod.rs | 2 +- 7 files changed, 73 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96de11cb2..009b1e458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4646,6 +4646,7 @@ dependencies = [ "bittorrent-udp-tracker-core", "tokio", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 3a353f1fc..463c81ac8 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -10,6 +10,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; +use torrust_tracker_configuration::Core; use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; @@ -40,14 +41,26 @@ pub struct QueryParams { #[allow(clippy::type_complexity)] pub async fn get_stats_handler( State(state): State<( + Arc, Arc, Arc>, + Arc, + Arc, Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + let metrics = get_metrics( + state.0.clone(), + state.1.clone(), + state.2.clone(), + state.3.clone(), + state.4.clone(), + state.5.clone(), + state.6.clone(), + ) + .await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index f6c661130..3be266d3a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -17,8 +17,11 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, in_memory_torrent_repository: Arc, ban_service: Arc>, + torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; + let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { + let metrics = tracker_core_stats_repository.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + } else { + let metrics = torrent_repository_stats_repository.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + }; + + let mut torrents_metrics = aggregate_active_swarm_metadata; + torrents_metrics.total_downloaded = total_downloaded; + // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP // tracker, but we keep them for now. In new major versions we should remove @@ -138,14 +177,16 @@ mod tests { use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_core::statistics::repository::Repository; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::{self}; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::statistics::metrics::Metrics; use crate::statistics::services::{get_metrics, TrackerMetrics}; @@ -157,8 +198,12 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); + let core_config = Arc::new(config.core.clone()); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(SenderStatus::Enabled)); + + let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats @@ -177,8 +222,11 @@ mod tests { let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), + core_config, + tracker_core_container.in_memory_torrent_repository.clone(), ban_service.clone(), + torrent_repository_container.stats_repository.clone(), + tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), ) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index cfc252e34..ab5eb3f09 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,7 @@ const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torren const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +pub const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index ff8187379..0c421863f 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -10,7 +10,7 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; +pub const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { From 8d3a6fe9c3ef05a914ac51437260191a7b3c4e47 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 11:48:02 +0100 Subject: [PATCH 100/247] refactor: [#1543] extract methods --- .../src/v1/context/stats/resources.rs | 5 +- .../src/statistics/metrics.rs | 31 ++++- .../src/statistics/services.rs | 109 +++++++++++------- 3 files changed, 98 insertions(+), 47 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 8b6d639c8..08f83026f 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -134,9 +134,8 @@ impl From for LabeledStats { #[cfg(test)] mod tests { - use torrust_rest_tracker_api_core::statistics::metrics::Metrics; + use torrust_rest_tracker_api_core::statistics::metrics::{Metrics, TorrentsMetrics}; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use super::Stats; @@ -145,7 +144,7 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata { + torrents_metrics: TorrentsMetrics { total_complete: 1, total_downloaded: 2, total_incomplete: 3, diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index 7e41cf713..ca556becf 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -1,4 +1,33 @@ -/// Metrics collected by the tracker. +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; + +/// Metrics collected by the tracker at the swarm layer. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of peers that have ever completed downloading. + pub total_downloaded: u64, + + /// Total number of seeders. + pub total_complete: u64, + + /// Total number of leechers. + pub total_incomplete: u64, + + /// Total number of torrents. + pub total_torrents: u64, +} + +impl From for TorrentsMetrics { + fn from(value: AggregateActiveSwarmMetadata) -> Self { + Self { + total_downloaded: value.total_downloaded, + total_complete: value.total_complete, + total_incomplete: value.total_incomplete, + total_torrents: value.total_torrents, + } + } +} + +/// Metrics collected by the tracker at the delivery layer. /// /// - Number of connections handled /// - Number of `announce` requests handled diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index cc02f61e6..a899cb961 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -9,10 +9,10 @@ use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_tracker_metrics::metric_name; -use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_torrent_repository::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; use torrust_udp_tracker_server::statistics as udp_server_statistics; +use super::metrics::TorrentsMetrics; use crate::statistics::metrics::Metrics; /// All the metrics collected by the tracker. @@ -21,7 +21,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateActiveSwarmMetadata, + pub torrents_metrics: TorrentsMetrics, /// Application level metrics. Usage statistics/metrics. /// @@ -30,7 +30,6 @@ pub struct TrackerMetrics { } /// It returns all the [`TrackerMetrics`] -#[allow(deprecated)] pub async fn get_metrics( core_config: Arc, in_memory_torrent_repository: Arc, @@ -40,10 +39,25 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { + TrackerMetrics { + torrents_metrics: get_torrents_metrics( + core_config, + in_memory_torrent_repository, + torrent_repository_stats_repository, + tracker_core_stats_repository, + ) + .await, + protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, + } +} + +async fn get_torrents_metrics( + core_config: Arc, + in_memory_torrent_repository: Arc, + torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, +) -> TorrentsMetrics { let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - let http_stats = http_stats_repository.get_stats().await; - let udp_server_stats = udp_server_stats_repository.get_stats().await; let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { let metrics = tracker_core_stats_repository.get_metrics().await; @@ -73,47 +87,57 @@ pub async fn get_metrics( } }; - let mut torrents_metrics = aggregate_active_swarm_metadata; + let mut torrents_metrics: TorrentsMetrics = aggregate_active_swarm_metadata.into(); torrents_metrics.total_downloaded = total_downloaded; + torrents_metrics +} + +#[allow(deprecated)] +async fn get_protocol_metrics( + ban_service: Arc>, + http_stats_repository: Arc, + udp_server_stats_repository: Arc, +) -> Metrics { + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let http_stats = http_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; + // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP // tracker, but we keep them for now. In new major versions we should remove // them. - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, - tcp4_announces_handled: http_stats.tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, - // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, - udp4_announces_handled: udp_server_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, - // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, - }, + Metrics { + // TCPv4 + tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_announces_handled: http_stats.tcp4_announces_handled, + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, + tcp6_announces_handled: http_stats.tcp6_announces_handled, + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: udp_server_stats.udp_requests_aborted, + udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: udp_server_stats.udp4_requests, + udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_announces_handled: udp_server_stats.udp4_announces_handled, + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, + udp4_responses: udp_server_stats.udp4_responses, + udp4_errors_handled: udp_server_stats.udp4_errors_handled, + // UDPv6 + udp6_requests: udp_server_stats.udp6_requests, + udp6_connections_handled: udp_server_stats.udp6_connections_handled, + udp6_announces_handled: udp_server_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, + udp6_responses: udp_server_stats.udp6_responses, + udp6_errors_handled: udp_server_stats.udp6_errors_handled, } } @@ -184,11 +208,10 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; - use crate::statistics::metrics::Metrics; + use crate::statistics::metrics::{Metrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -235,7 +258,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata::default(), + torrents_metrics: TorrentsMetrics::default(), protocol_metrics: Metrics::default(), } ); From b0e744390603b94a00232f1e8d72e61010c2a24a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:08:05 +0100 Subject: [PATCH 101/247] fix: [#1543] return always in API the downloads number from tracker-core The tracker-core always has the metric alhoutght it can be persisted or not. When it's not persisted, it contains the number of downloads during the session. On the other hand, the `torrent-repository` metri uses labels, so you have to sum all values for all labels to get the total. ``` torrent_repository_torrents_downloads_total{peer_role="seeder"} 1 tracker_core_persistent_torrents_downloads_total{} 1 ``` --- .../src/v1/context/stats/handlers.rs | 5 -- .../src/v1/context/stats/routes.rs | 2 - .../src/statistics/services.rs | 51 ++----------------- .../torrent-repository/src/statistics/mod.rs | 2 +- .../src/http/client/requests/announce.rs | 2 +- packages/tracker-core/src/statistics/mod.rs | 2 +- .../tracker-core/src/statistics/repository.rs | 21 +++++++- 7 files changed, 26 insertions(+), 59 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 463c81ac8..47bb5ad16 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -10,7 +10,6 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; -use torrust_tracker_configuration::Core; use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; @@ -41,10 +40,8 @@ pub struct QueryParams { #[allow(clippy::type_complexity)] pub async fn get_stats_handler( State(state): State<( - Arc, Arc, Arc>, - Arc, Arc, Arc, Arc, @@ -57,8 +54,6 @@ pub async fn get_stats_handler( state.2.clone(), state.3.clone(), state.4.clone(), - state.5.clone(), - state.6.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 3be266d3a..a573b764a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -17,10 +17,8 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, in_memory_torrent_repository: Arc, ban_service: Arc>, - torrent_repository_stats_repository: Arc, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { TrackerMetrics { - torrents_metrics: get_torrents_metrics( - core_config, - in_memory_torrent_repository, - torrent_repository_stats_repository, - tracker_core_stats_repository, - ) - .await, + torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, } } async fn get_torrents_metrics( - core_config: Arc, in_memory_torrent_repository: Arc, - torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, ) -> TorrentsMetrics { let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { - let metrics = tracker_core_stats_repository.get_metrics().await; - - let downloads = metrics.metric_collection.get_counter_value( - &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), - &LabelSet::default(), - ); - - if let Some(downloads) = downloads { - downloads.value() - } else { - 0 - } - } else { - let metrics = torrent_repository_stats_repository.get_metrics().await; - - let downloads = metrics.metric_collection.get_counter_value( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), - &LabelSet::default(), - ); - - if let Some(downloads) = downloads { - downloads.value() - } else { - 0 - } - }; - let mut torrents_metrics: TorrentsMetrics = aggregate_active_swarm_metadata.into(); - torrents_metrics.total_downloaded = total_downloaded; + torrents_metrics.total_downloaded = tracker_core_stats_repository.get_torrents_downloads_total().await; torrents_metrics } @@ -152,7 +110,6 @@ pub struct TrackerLabeledMetrics { /// /// Will panic if the metrics cannot be merged. This could happen if the /// packages are producing duplicate metric names, for example. -#[allow(deprecated)] pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, @@ -245,10 +202,8 @@ mod tests { let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( - core_config, tracker_core_container.in_memory_torrent_repository.clone(), ban_service.clone(), - torrent_repository_container.stats_repository.clone(), tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index ab5eb3f09..cfc252e34 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,7 @@ const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torren const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -pub const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs index 29b5d1221..87bdbad52 100644 --- a/packages/tracker-client/src/http/client/requests/announce.rs +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -102,7 +102,7 @@ impl QueryBuilder { peer_id: PeerId(*b"-qB00000000000000001").0, port: 17548, left: 0, - event: Some(Event::Completed), + event: Some(Event::Started), compact: Some(Compact::NotAccepted), }; Self { diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 0c421863f..ff8187379 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -10,7 +10,7 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -pub const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; +const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs index dd0ebebe7..21b1da7f2 100644 --- a/packages/tracker-core/src/statistics/repository.rs +++ b/packages/tracker-core/src/statistics/repository.rs @@ -4,10 +4,11 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::describe_metrics; use super::metrics::Metrics; +use super::{describe_metrics, TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL}; /// A repository for the torrent repository metrics. #[derive(Clone)] @@ -154,4 +155,22 @@ impl Repository { result } + + /// Get the total number of torrent downloads. + /// + /// The value is persisted in database if persistence for downloads metrics is enabled. + pub async fn get_torrents_downloads_total(&self) -> u64 { + let metrics = self.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + } } From 43c71793aaba1feba5d246c42db703b443721e60 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:15:26 +0100 Subject: [PATCH 102/247] refactor: [#1543] rename Metrics to ProtocolMetrics --- .../src/v1/context/stats/resources.rs | 4 ++-- .../rest-tracker-api-core/src/statistics/metrics.rs | 2 +- .../rest-tracker-api-core/src/statistics/services.rs | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 08f83026f..ece50383b 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -134,7 +134,7 @@ impl From for LabeledStats { #[cfg(test)] mod tests { - use torrust_rest_tracker_api_core::statistics::metrics::{Metrics, TorrentsMetrics}; + use torrust_rest_tracker_api_core::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; use super::Stats; @@ -150,7 +150,7 @@ mod tests { total_incomplete: 3, total_torrents: 4 }, - protocol_metrics: Metrics { + protocol_metrics: ProtocolMetrics { // TCP tcp4_connections_handled: 5, tcp4_announces_handled: 6, diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index ca556becf..ecdecd130 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -36,7 +36,7 @@ impl From for TorrentsMetrics { /// These metrics are collected for each connection type: UDP and HTTP /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] -pub struct Metrics { +pub struct ProtocolMetrics { /// Total number of TCP (HTTP tracker) connections from IPv4 peers. /// Since the HTTP tracker spec does not require a handshake, this metric /// increases for every HTTP request. diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index d5b68c274..9a2eb3667 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_udp_tracker_server::statistics as udp_server_statistics; use super::metrics::TorrentsMetrics; -use crate::statistics::metrics::Metrics; +use crate::statistics::metrics::ProtocolMetrics; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -21,7 +21,7 @@ pub struct TrackerMetrics { /// Application level metrics. Usage statistics/metrics. /// /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, + pub protocol_metrics: ProtocolMetrics, } /// It returns all the [`TrackerMetrics`] @@ -56,7 +56,7 @@ async fn get_protocol_metrics( ban_service: Arc>, http_stats_repository: Arc, udp_server_stats_repository: Arc, -) -> Metrics { +) -> ProtocolMetrics { let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -66,7 +66,7 @@ async fn get_protocol_metrics( // tracker, but we keep them for now. In new major versions we should remove // them. - Metrics { + ProtocolMetrics { // TCPv4 tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, tcp4_announces_handled: http_stats.tcp4_announces_handled, @@ -168,7 +168,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; - use crate::statistics::metrics::{Metrics, TorrentsMetrics}; + use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -214,7 +214,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: TorrentsMetrics::default(), - protocol_metrics: Metrics::default(), + protocol_metrics: ProtocolMetrics::default(), } ); } From 92242f8b54e7b0091b053a3ab8c110638b51a7a5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:21:08 +0100 Subject: [PATCH 103/247] fix: [#1543] Remove peerless torrents when it's enabled in the tracker policy There were not being removed when stats was enabled becuase the tracker was counting downloads only from the active swarms. Now the API exposed metric (global downldoads) is not taken from the in-memory data structrure unless stats persistence is disabled. In that case, the global total would be per session (since the tracker started), and reset when the tracker restarts. --- packages/torrent-repository/src/swarm.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 84e1f2da4..362fc6153 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -201,13 +201,7 @@ impl Swarm { /// Returns true if the swarm should be removed according to the retention /// policy. fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { - // If the policy is to remove peerless torrents and the swarm is empty (no peers), - (policy.remove_peerless_torrents && self.is_empty()) - // but not when the policy is to persist torrent stats and the - // torrent has been downloaded at least once. - // (because the only way to store the counter is to keep the swarm in memory. - // See https://github.com/torrust/torrust-tracker/issues/1502) - && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) + policy.remove_peerless_torrents && self.is_empty() } fn update_metadata_on_insert(&mut self, added_peer: &Arc) { From 55149bcf97ad261e0ef36334520ce4cc73082ecc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:16:12 +0100 Subject: [PATCH 104/247] refactor: [#1519] rename dir torrent-repository --- Cargo.toml | 2 +- packages/axum-http-tracker-server/Cargo.toml | 2 +- packages/axum-rest-tracker-api-server/Cargo.toml | 2 +- packages/http-tracker-core/Cargo.toml | 2 +- packages/rest-tracker-api-core/Cargo.toml | 2 +- .../.gitignore | 0 .../Cargo.toml | 0 .../README.md | 0 .../src/container.rs | 0 .../src/event.rs | 0 .../src/lib.rs | 0 .../src/statistics/activity_metrics_updater.rs | 0 .../src/statistics/event/handler.rs | 0 .../src/statistics/event/listener.rs | 0 .../src/statistics/event/mod.rs | 0 .../src/statistics/metrics.rs | 0 .../src/statistics/mod.rs | 0 .../src/statistics/repository.rs | 0 .../src/swarm.rs | 0 .../src/swarms.rs | 0 packages/torrent-repository-benchmarking/README.md | 2 +- packages/tracker-core/Cargo.toml | 2 +- packages/udp-tracker-core/Cargo.toml | 2 +- packages/udp-tracker-server/Cargo.toml | 2 +- 24 files changed, 9 insertions(+), 9 deletions(-) rename packages/{torrent-repository => swarm-coordination-registry}/.gitignore (100%) rename packages/{torrent-repository => swarm-coordination-registry}/Cargo.toml (100%) rename packages/{torrent-repository => swarm-coordination-registry}/README.md (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/container.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/event.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/lib.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/activity_metrics_updater.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/handler.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/listener.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/mod.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/metrics.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/mod.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/repository.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/swarm.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/swarms.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 219701d03..3e6e3e073 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 81831a614..51283ee01 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 296f77d61..558dbf6c1 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,7 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 37b540e39..008aa92c6 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,7 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 8cfe601b2..9a086ad19 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,7 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/torrent-repository/.gitignore b/packages/swarm-coordination-registry/.gitignore similarity index 100% rename from packages/torrent-repository/.gitignore rename to packages/swarm-coordination-registry/.gitignore diff --git a/packages/torrent-repository/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml similarity index 100% rename from packages/torrent-repository/Cargo.toml rename to packages/swarm-coordination-registry/Cargo.toml diff --git a/packages/torrent-repository/README.md b/packages/swarm-coordination-registry/README.md similarity index 100% rename from packages/torrent-repository/README.md rename to packages/swarm-coordination-registry/README.md diff --git a/packages/torrent-repository/src/container.rs b/packages/swarm-coordination-registry/src/container.rs similarity index 100% rename from packages/torrent-repository/src/container.rs rename to packages/swarm-coordination-registry/src/container.rs diff --git a/packages/torrent-repository/src/event.rs b/packages/swarm-coordination-registry/src/event.rs similarity index 100% rename from packages/torrent-repository/src/event.rs rename to packages/swarm-coordination-registry/src/event.rs diff --git a/packages/torrent-repository/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs similarity index 100% rename from packages/torrent-repository/src/lib.rs rename to packages/swarm-coordination-registry/src/lib.rs diff --git a/packages/torrent-repository/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs similarity index 100% rename from packages/torrent-repository/src/statistics/activity_metrics_updater.rs rename to packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/handler.rs rename to packages/swarm-coordination-registry/src/statistics/event/handler.rs diff --git a/packages/torrent-repository/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/listener.rs rename to packages/swarm-coordination-registry/src/statistics/event/listener.rs diff --git a/packages/torrent-repository/src/statistics/event/mod.rs b/packages/swarm-coordination-registry/src/statistics/event/mod.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/mod.rs rename to packages/swarm-coordination-registry/src/statistics/event/mod.rs diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/swarm-coordination-registry/src/statistics/metrics.rs similarity index 100% rename from packages/torrent-repository/src/statistics/metrics.rs rename to packages/swarm-coordination-registry/src/statistics/metrics.rs diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs similarity index 100% rename from packages/torrent-repository/src/statistics/mod.rs rename to packages/swarm-coordination-registry/src/statistics/mod.rs diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/swarm-coordination-registry/src/statistics/repository.rs similarity index 100% rename from packages/torrent-repository/src/statistics/repository.rs rename to packages/swarm-coordination-registry/src/statistics/repository.rs diff --git a/packages/torrent-repository/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm.rs similarity index 100% rename from packages/torrent-repository/src/swarm.rs rename to packages/swarm-coordination-registry/src/swarm.rs diff --git a/packages/torrent-repository/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs similarity index 100% rename from packages/torrent-repository/src/swarms.rs rename to packages/swarm-coordination-registry/src/swarms.rs diff --git a/packages/torrent-repository-benchmarking/README.md b/packages/torrent-repository-benchmarking/README.md index f248ca0da..a0556a58f 100644 --- a/packages/torrent-repository-benchmarking/README.md +++ b/packages/torrent-repository-benchmarking/README.md @@ -1,4 +1,4 @@ -# Torrust Tracker Torrent Repository Benchmarking +# Torrust Tracker Swarm Coordination Registry Benchmarking A library to runt benchmarking for different implementations of a repository of torrents files and their peers. Torrent repositories are used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker). diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index a2d08dfa0..8c9bf7769 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 9a27ec826..2933a7e70 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index a0c129acb..396dc0805 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } From 2b7a25163a6a0d21aa0defe9e2999be1c5105ae0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:42:57 +0100 Subject: [PATCH 105/247] refactor: [#1519] rename crate torrust-tracker-torrent-repository to torrust-tracker-swarm-coordination-registry --- .github/workflows/deployment.yaml | 2 +- Cargo.lock | 40 +++++++++---------- Cargo.toml | 2 +- packages/axum-http-tracker-server/Cargo.toml | 2 +- .../src/environment.rs | 2 +- .../axum-http-tracker-server/src/server.rs | 2 +- .../axum-rest-tracker-api-server/Cargo.toml | 2 +- .../src/environment.rs | 2 +- .../src/v1/context/stats/handlers.rs | 2 +- packages/http-tracker-core/Cargo.toml | 2 +- packages/http-tracker-core/src/container.rs | 2 +- packages/rest-tracker-api-core/Cargo.toml | 2 +- .../rest-tracker-api-core/src/container.rs | 2 +- .../src/statistics/services.rs | 4 +- .../swarm-coordination-registry/Cargo.toml | 2 +- packages/tracker-core/Cargo.toml | 2 +- packages/tracker-core/src/container.rs | 2 +- .../src/statistics/event/handler.rs | 2 +- .../src/statistics/event/listener.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- .../src/torrent/repository/in_memory.rs | 2 +- .../tracker-core/tests/common/test_env.rs | 6 +-- packages/udp-tracker-core/Cargo.toml | 2 +- packages/udp-tracker-core/src/container.rs | 2 +- packages/udp-tracker-server/Cargo.toml | 2 +- .../udp-tracker-server/src/environment.rs | 2 +- .../jobs/activity_metrics_updater.rs | 2 +- src/bootstrap/jobs/torrent_repository.rs | 2 +- src/container.rs | 2 +- 29 files changed, 50 insertions(+), 52 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index d62b4bbcc..4e8fd579b 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -77,7 +77,7 @@ jobs: cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-swarm-coordination-registry cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-benchmarking - cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-udp-tracker-server diff --git a/Cargo.lock b/Cargo.lock index 009b1e458..ecf178a59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -592,8 +592,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", ] @@ -680,8 +680,8 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "url", ] @@ -710,8 +710,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "zerocopy 0.7.35", ] @@ -4555,8 +4555,8 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tower", "tower-http", "tracing", @@ -4595,8 +4595,8 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tower", "tower-http", @@ -4649,8 +4649,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", ] @@ -4697,8 +4697,8 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", @@ -4819,17 +4819,7 @@ dependencies = [ ] [[package]] -name = "torrust-tracker-test-helpers" -version = "3.0.0-develop" -dependencies = [ - "rand 0.9.1", - "torrust-tracker-configuration", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "torrust-tracker-torrent-repository" +name = "torrust-tracker-swarm-coordination-registry" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", @@ -4840,7 +4830,7 @@ dependencies = [ "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.1", + "rand 0.8.5", "rstest", "serde", "thiserror 2.0.12", @@ -4854,6 +4844,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "torrust-tracker-test-helpers" +version = "3.0.0-develop" +dependencies = [ + "rand 0.9.1", + "torrust-tracker-configuration", + "tracing", + "tracing-subscriber", +] + [[package]] name = "torrust-tracker-torrent-repository-benchmarking" version = "3.0.0-develop" @@ -4900,8 +4900,8 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "url", "uuid", diff --git a/Cargo.toml b/Cargo.toml index 3e6e3e073..976176155 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 51283ee01..fa195489c 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 0c1431db5..54c6b7767 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,7 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index f7d1ed7ea..b8ece8086 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -259,8 +259,8 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; + use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher}; diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 558dbf6c1..9493b8693 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,7 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index be93a8723..6be4cc53c 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,7 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 47bb5ad16..b907b861a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -77,7 +77,7 @@ pub async fn get_metrics_handler( State(state): State<( Arc, Arc>, - Arc, + Arc, Arc, Arc, Arc, diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 008aa92c6..45af59baa 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,7 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index f063c0061..35f75e1fe 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 9a086ad19..cc8eda903 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,7 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 1c4a08e26..f76c2ece3 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,7 +7,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 9a2eb3667..56536a02f 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -113,7 +113,7 @@ pub struct TrackerLabeledMetrics { pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, - swarms_stats_repository: Arc, + swarms_stats_repository: Arc, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, @@ -165,8 +165,8 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 510a59e9d..074562a47 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "A library that provides a repository of torrents files and their peers." keywords = ["library", "repository", "torrents"] -name = "torrust-tracker-torrent-repository" +name = "torrust-tracker-swarm-coordination-registry" readme = "README.md" authors.workspace = true diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 8c9bf7769..f04a3b89b 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 02af67118..949761553 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 0909dc184..9a5182f25 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_torrent_repository::event::Event; +use torrust_tracker_swarm_coordination_registry::event::Event; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 2702aa858..d3beaf41f 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; -use torrust_tracker_torrent_repository::event::receiver::Receiver; +use torrust_tracker_swarm_coordination_registry::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 766fa5c4a..cbdf01193 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -148,7 +148,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::Swarms; + use torrust_tracker_swarm_coordination_registry::Swarms; use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index cc873726d..47b34ad26 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; +use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. /// diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 2aafbbbad..64bdcaad8 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -14,7 +14,7 @@ use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; pub struct TestEnv { pub torrent_repository_container: Arc, @@ -67,11 +67,10 @@ impl TestEnv { async fn run_jobs(&self) { let mut jobs = vec![]; - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), &self.torrent_repository_container.stats_repository, ); - jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( @@ -83,7 +82,6 @@ impl TestEnv { .tracker_policy .persistent_torrent_completed_stat, ); - jobs.push(job); // Give the event listeners some time to start diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 2933a7e70..290c5fbfd 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 07a8a09ef..c4be395fc 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 396dc0805..72fa520ba 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 94a166e4e..3f479a02d 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,7 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; diff --git a/src/bootstrap/jobs/activity_metrics_updater.rs b/src/bootstrap/jobs/activity_metrics_updater.rs index 7411c05cf..9813fed65 100644 --- a/src/bootstrap/jobs/activity_metrics_updater.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -11,7 +11,7 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { - torrust_tracker_torrent_repository::statistics::activity_metrics_updater::start_job( + torrust_tracker_swarm_coordination_registry::statistics::activity_metrics_updater::start_job( &app_container.torrent_repository_container.swarms.clone(), &app_container.torrent_repository_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index ea0d215ee..c64917ea6 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -7,7 +7,7 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( app_container.torrent_repository_container.event_bus.receiver(), &app_container.torrent_repository_container.stats_repository, ); diff --git a/src/container.rs b/src/container.rs index 98c455780..bb5873fb2 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,7 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; From 2768306a8b5db288f27dedac6ce59a11efc61bcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:50:56 +0100 Subject: [PATCH 106/247] refactor: [#1519] rename Swarm to Coordinator --- .../swarm-coordination-registry/src/lib.rs | 4 +- .../swarm-coordination-registry/src/swarm.rs | 102 +++++++++--------- .../swarm-coordination-registry/src/swarms.rs | 12 +-- 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 3adf2f18d..c93f553fa 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -10,8 +10,8 @@ use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Swarms = swarms::Swarms; -pub type SwarmHandle = Arc>; -pub type Swarm = swarm::Swarm; +pub type SwarmHandle = Arc>; +pub type Coordinator = swarm::Coordinator; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/swarm-coordination-registry/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm.rs index 362fc6153..81e454d8b 100644 --- a/packages/swarm-coordination-registry/src/swarm.rs +++ b/packages/swarm-coordination-registry/src/swarm.rs @@ -15,14 +15,14 @@ use crate::event::sender::Sender; use crate::event::Event; #[derive(Clone)] -pub struct Swarm { +pub struct Coordinator { info_hash: InfoHash, peers: BTreeMap>, metadata: SwarmMetadata, event_sender: Sender, } -impl Swarm { +impl Coordinator { #[must_use] pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { Self { @@ -326,26 +326,26 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert!(swarm.is_empty()); } #[test] fn it_should_have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert_eq!(swarm.len(), 0); } #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -354,7 +354,7 @@ mod tests { #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -365,7 +365,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -376,7 +376,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -387,7 +387,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -398,7 +398,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -411,7 +411,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -425,7 +425,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_a_non_existing_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -434,7 +434,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) @@ -453,7 +453,7 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -469,7 +469,7 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -486,7 +486,7 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -507,20 +507,20 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::tests::sample_info_hash; - use crate::Swarm; + use crate::Coordinator; - fn empty_swarm() -> Swarm { - Swarm::new(&sample_info_hash(), 0, None) + fn empty_swarm() -> Coordinator { + Coordinator::new(&sample_info_hash(), 0, None) } - async fn not_empty_swarm() -> Swarm { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + async fn not_empty_swarm() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); swarm.upsert_peer(PeerBuilder::default().build().into()).await; swarm } - async fn not_empty_swarm_with_downloads() -> Swarm { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + async fn not_empty_swarm_with_downloads() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -602,7 +602,7 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) @@ -619,7 +619,7 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -641,7 +641,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -661,7 +661,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -676,7 +676,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -691,7 +691,7 @@ mod tests { #[tokio::test] async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert!(swarm.is_peerless()); } @@ -700,12 +700,12 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leechers = swarm.metadata().leechers(); @@ -718,7 +718,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeders = swarm.metadata().seeders(); @@ -732,7 +732,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let downloads = swarm.metadata().downloads(); @@ -747,12 +747,12 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leecher = PeerBuilder::leecher().build(); @@ -767,7 +767,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); @@ -786,12 +786,12 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leecher = PeerBuilder::leecher().build(); @@ -806,7 +806,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); @@ -824,12 +824,12 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -848,7 +848,7 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::seeder().build(); @@ -867,7 +867,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -884,7 +884,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -913,7 +913,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -925,7 +925,7 @@ mod tests { expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); - let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); swarm.upsert_peer(peer.into()).await; } @@ -942,7 +942,7 @@ mod tests { vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -962,7 +962,7 @@ mod tests { vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -992,7 +992,7 @@ mod tests { ], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -1028,7 +1028,7 @@ mod tests { ], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(started_peer.into()).await; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index 8e7bc24de..12fe2190d 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; -use crate::swarm::Swarm; +use crate::swarm::Coordinator; use crate::SwarmHandle; #[derive(Default)] @@ -60,7 +60,7 @@ impl Swarms { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); let new_swarm_handle = - SwarmHandle::new(Swarm::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + SwarmHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -86,7 +86,7 @@ impl Swarms { } /// Inserts a new swarm. Only used for testing purposes. - pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { + pub fn insert(&self, info_hash: &InfoHash, swarm: Coordinator) { // code-review: swarms builder? or constructor from vec? // It's only used for testing purposes. It allows to pre-define the // initial state of the swarm without having to go through the upsert @@ -366,7 +366,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(info_hash, *completed, self.event_sender.clone()).into()); + let entry = SwarmHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -853,7 +853,7 @@ mod tests { use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{Swarm, SwarmHandle}; + use crate::{Coordinator, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -871,7 +871,7 @@ mod tests { } #[allow(clippy::from_over_into)] - impl Into for Swarm { + impl Into for Coordinator { fn into(self) -> TorrentEntryInfo { let torrent_entry_info = TorrentEntryInfo { swarm_metadata: self.metadata(), From ba37801d3c62b2b2c4ad1df1785609e6543d7d61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:55:08 +0100 Subject: [PATCH 107/247] refactor: [#1519] rename Swarms to Registry --- .../swarm-coordination-registry/src/lib.rs | 2 +- .../swarm-coordination-registry/src/swarms.rs | 110 +++++++++--------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index c93f553fa..f3926331a 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarms::Swarms; +pub type Swarms = swarms::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index 12fe2190d..c14cb66b7 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -15,12 +15,12 @@ use crate::swarm::Coordinator; use crate::SwarmHandle; #[derive(Default)] -pub struct Swarms { +pub struct Registry { swarms: SkipMap, event_sender: Sender, } -impl Swarms { +impl Registry { #[must_use] pub fn new(event_sender: Sender) -> Self { Self { @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -543,13 +543,13 @@ mod tests { #[tokio::test] async fn it_should_return_zero_length_when_it_has_no_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.len(), 0); } #[tokio::test] async fn it_should_return_the_length_when_it_has_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -558,7 +558,7 @@ mod tests { #[tokio::test] async fn it_should_be_empty_when_it_has_no_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert!(swarms.is_empty()); let info_hash = sample_info_hash(); @@ -569,7 +569,7 @@ mod tests { #[tokio::test] async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -581,12 +581,12 @@ mod tests { use std::sync::Arc; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -597,7 +597,7 @@ mod tests { #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -618,12 +618,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -637,7 +637,7 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); @@ -646,7 +646,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -680,12 +680,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let peers = swarms .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) @@ -697,7 +697,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -714,7 +714,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -757,12 +757,12 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); @@ -774,7 +774,7 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -790,7 +790,7 @@ mod tests { #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -811,8 +811,8 @@ mod tests { .contains(&Arc::new(peer))); } - async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let swarms = Arc::new(Swarms::default()); + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let swarms = Arc::new(Registry::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -884,7 +884,7 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -915,12 +915,12 @@ mod tests { use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn without_pagination() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -955,7 +955,7 @@ mod tests { use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -963,7 +963,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -998,7 +998,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -1033,7 +1033,7 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -1061,14 +1061,14 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); @@ -1085,7 +1085,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &leecher(), None) @@ -1107,7 +1107,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &seeder(), None) @@ -1129,7 +1129,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &complete_peer(), None) @@ -1151,7 +1151,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -1183,12 +1183,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn no_peerless_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); } @@ -1197,7 +1197,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); @@ -1210,12 +1210,12 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn no_peers() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.count_peers().await.unwrap(), 0); } @@ -1224,7 +1224,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!(swarms.count_peers().await.unwrap(), 1); @@ -1238,12 +1238,12 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1263,7 +1263,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); @@ -1277,12 +1277,12 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1302,7 +1302,7 @@ mod tests { async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { // code-review: do we want to allow this? - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1321,7 +1321,7 @@ mod tests { #[tokio::test] async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::sample_info_hash; #[tokio::test] @@ -1374,7 +1374,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } @@ -1398,7 +1398,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -1425,7 +1425,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); // Add the new torrent swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); From 63f04e57ffbf27644692fd0fb4b7527415188f4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:04:27 +0100 Subject: [PATCH 108/247] refactor: [#1519] extract mod coordinator --- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/{swarm.rs => swarm/coordinator.rs} | 16 ++++++++-------- .../swarm-coordination-registry/src/swarm/mod.rs | 1 + .../swarm-coordination-registry/src/swarms.rs | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) rename packages/swarm-coordination-registry/src/{swarm.rs => swarm/coordinator.rs} (98%) create mode 100644 packages/swarm-coordination-registry/src/swarm/mod.rs diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index f3926331a..2e591f41c 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -11,7 +11,7 @@ use torrust_tracker_clock::clock; pub type Swarms = swarms::Registry; pub type SwarmHandle = Arc>; -pub type Coordinator = swarm::Coordinator; +pub type Coordinator = swarm::coordinator::Coordinator; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/swarm-coordination-registry/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm/coordinator.rs similarity index 98% rename from packages/swarm-coordination-registry/src/swarm.rs rename to packages/swarm-coordination-registry/src/swarm/coordinator.rs index 81e454d8b..1ddf3e60b 100644 --- a/packages/swarm-coordination-registry/src/swarm.rs +++ b/packages/swarm-coordination-registry/src/swarm/coordinator.rs @@ -326,7 +326,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[test] @@ -553,7 +553,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; - use crate::swarm::tests::for_retaining_policy::{ + use crate::swarm::coordinator::tests::for_retaining_policy::{ empty_swarm, not_empty_swarm, not_empty_swarm_with_downloads, remove_peerless_torrents_policy, }; @@ -582,7 +582,7 @@ mod tests { mod when_removing_peerless_torrents_is_disabled { - use crate::swarm::tests::for_retaining_policy::{ + use crate::swarm::coordinator::tests::for_retaining_policy::{ don_not_remove_peerless_torrents_policy, empty_swarm, not_empty_swarm, }; @@ -700,7 +700,7 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -747,7 +747,7 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -786,7 +786,7 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -824,7 +824,7 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -913,7 +913,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] diff --git a/packages/swarm-coordination-registry/src/swarm/mod.rs b/packages/swarm-coordination-registry/src/swarm/mod.rs new file mode 100644 index 000000000..115b2c7c9 --- /dev/null +++ b/packages/swarm-coordination-registry/src/swarm/mod.rs @@ -0,0 +1 @@ +pub mod coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index c14cb66b7..158cc88c7 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; -use crate::swarm::Coordinator; +use crate::swarm::coordinator::Coordinator; use crate::SwarmHandle; #[derive(Default)] From cfc5b342180ccfa5e2388403ede3d7a33ac35af3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:05:25 +0100 Subject: [PATCH 109/247] refactor: [#1519] rename mod swarms to resgistry --- .../swarm-coordination-registry/src/lib.rs | 4 +-- .../src/{swarms.rs => registry.rs} | 36 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) rename packages/swarm-coordination-registry/src/{swarms.rs => registry.rs} (98%) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 2e591f41c..82a29b867 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -2,14 +2,14 @@ pub mod container; pub mod event; pub mod statistics; pub mod swarm; -pub mod swarms; +pub mod registry; use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarms::Registry; +pub type Swarms = registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/registry.rs similarity index 98% rename from packages/swarm-coordination-registry/src/swarms.rs rename to packages/swarm-coordination-registry/src/registry.rs index 158cc88c7..970b664ec 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/registry.rs @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -581,7 +581,7 @@ mod tests { use std::sync::Arc; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -617,8 +617,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Registry; + use crate::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -679,8 +679,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Registry; + use crate::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -757,7 +757,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -912,10 +912,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -952,10 +952,10 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -1061,7 +1061,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization @@ -1183,7 +1183,7 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1210,7 +1210,7 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1238,7 +1238,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1277,7 +1277,7 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::sample_info_hash; #[tokio::test] From 9146681a798ce22df46e069e4ea357e6c18ce8b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:08:21 +0100 Subject: [PATCH 110/247] refactor: [#1519] move mod registry --- .../swarm-coordination-registry/src/lib.rs | 3 +- .../src/swarm/mod.rs | 1 + .../src/{ => swarm}/registry.rs | 36 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) rename packages/swarm-coordination-registry/src/{ => swarm}/registry.rs (97%) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 82a29b867..bbeb5e924 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -2,14 +2,13 @@ pub mod container; pub mod event; pub mod statistics; pub mod swarm; -pub mod registry; use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = registry::Registry; +pub type Swarms = swarm::registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarm/mod.rs b/packages/swarm-coordination-registry/src/swarm/mod.rs index 115b2c7c9..925ae4948 100644 --- a/packages/swarm-coordination-registry/src/swarm/mod.rs +++ b/packages/swarm-coordination-registry/src/swarm/mod.rs @@ -1 +1,2 @@ pub mod coordinator; +pub mod registry; diff --git a/packages/swarm-coordination-registry/src/registry.rs b/packages/swarm-coordination-registry/src/swarm/registry.rs similarity index 97% rename from packages/swarm-coordination-registry/src/registry.rs rename to packages/swarm-coordination-registry/src/swarm/registry.rs index 970b664ec..30652537b 100644 --- a/packages/swarm-coordination-registry/src/registry.rs +++ b/packages/swarm-coordination-registry/src/swarm/registry.rs @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -581,7 +581,7 @@ mod tests { use std::sync::Arc; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -617,8 +617,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::tests::the_swarm_repository::numeric_peer_id; - use crate::registry::Registry; + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -679,8 +679,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::tests::the_swarm_repository::numeric_peer_id; - use crate::registry::Registry; + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -757,7 +757,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -912,10 +912,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -952,10 +952,10 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -1061,7 +1061,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization @@ -1183,7 +1183,7 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1210,7 +1210,7 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1238,7 +1238,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1277,7 +1277,7 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::sample_info_hash; #[tokio::test] From 290c9eb491373ada84e9b3b2baa9bb596cbaffcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:09:02 +0100 Subject: [PATCH 111/247] refactor: [#1519] rename Swarms to Registry --- packages/swarm-coordination-registry/src/container.rs | 6 +++--- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/statistics/activity_metrics_updater.rs | 6 +++--- packages/tracker-core/src/torrent/manager.rs | 4 ++-- packages/tracker-core/src/torrent/repository/in_memory.rs | 6 +++--- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index d185180b1..1b56b3d4b 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -6,10 +6,10 @@ use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{self}; use crate::statistics::repository::Repository; -use crate::{statistics, Swarms}; +use crate::{statistics, Registry}; pub struct TorrentRepositoryContainer { - pub swarms: Arc, + pub swarms: Arc, pub event_bus: Arc, pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, @@ -26,7 +26,7 @@ impl TorrentRepositoryContainer { let stats_event_sender = event_bus.sender(); - let swarms = Arc::new(Swarms::new(stats_event_sender.clone())); + let swarms = Arc::new(Registry::new(stats_event_sender.clone())); Self { swarms, diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index bbeb5e924..0382c14fa 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarm::registry::Registry; +pub type Registry = swarm::registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs index 2dfa5fb4e..016e230ec 100644 --- a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs +++ b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs @@ -11,12 +11,12 @@ use tracing::instrument; use super::repository::Repository; use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; -use crate::{CurrentClock, Swarms}; +use crate::{CurrentClock, Registry}; #[must_use] #[instrument(skip(swarms, stats_repository))] pub fn start_job( - swarms: &Arc, + swarms: &Arc, stats_repository: &Arc, inactivity_cutoff: DurationSinceUnixEpoch, ) -> JoinHandle<()> { @@ -51,7 +51,7 @@ pub fn start_job( async fn update_activity_metrics( interval_in_secs: u64, - swarms: &Arc, + swarms: &Arc, stats_repository: &Arc, inactivity_cutoff: DurationSinceUnixEpoch, ) { diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index cbdf01193..5acc27980 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -148,7 +148,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_swarm_coordination_registry::Swarms; + use torrust_tracker_swarm_coordination_registry::Registry; use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -167,7 +167,7 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); let database_persistent_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 47b34ad26..ead05a32d 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; +use torrust_tracker_swarm_coordination_registry::{Registry, SwarmHandle}; /// In-memory repository for torrent entries. /// @@ -21,12 +21,12 @@ use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; #[derive(Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores swarms data. - swarms: Arc, + swarms: Arc, } impl InMemoryTorrentRepository { #[must_use] - pub fn new(swarms: Arc) -> Self { + pub fn new(swarms: Arc) -> Self { Self { swarms } } From bbe974de3537246dad431826fd3dc8764dd44375 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:11:23 +0100 Subject: [PATCH 112/247] refactor: [#1519] rename SwarmHandle to CoordinatorHandle --- .../swarm-coordination-registry/src/lib.rs | 2 +- .../src/swarm/registry.rs | 18 +++++++++--------- .../src/torrent/repository/in_memory.rs | 6 +++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 0382c14fa..fc7996817 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -9,7 +9,7 @@ use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Registry = swarm::registry::Registry; -pub type SwarmHandle = Arc>; +pub type CoordinatorHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; /// Working version, for production. diff --git a/packages/swarm-coordination-registry/src/swarm/registry.rs b/packages/swarm-coordination-registry/src/swarm/registry.rs index 30652537b..c8e98f307 100644 --- a/packages/swarm-coordination-registry/src/swarm/registry.rs +++ b/packages/swarm-coordination-registry/src/swarm/registry.rs @@ -12,11 +12,11 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; use crate::swarm::coordinator::Coordinator; -use crate::SwarmHandle; +use crate::CoordinatorHandle; #[derive(Default)] pub struct Registry { - swarms: SkipMap, + swarms: SkipMap, event_sender: Sender, } @@ -60,7 +60,7 @@ impl Registry { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); let new_swarm_handle = - SwarmHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + CoordinatorHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -107,7 +107,7 @@ impl Registry { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub async fn remove(&self, key: &InfoHash) -> Option { + pub async fn remove(&self, key: &InfoHash) -> Option { let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); if let Some(event_sender) = self.event_sender.as_deref() { @@ -123,7 +123,7 @@ impl Registry { /// /// An `Option` containing the tracked torrent handle if found. #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.swarms.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -138,7 +138,7 @@ impl Registry { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { match pagination { Some(pagination) => self .swarms @@ -366,7 +366,7 @@ impl Registry { continue; } - let entry = SwarmHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); + let entry = CoordinatorHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -853,7 +853,7 @@ mod tests { use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{Coordinator, SwarmHandle}; + use crate::{Coordinator, CoordinatorHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -865,7 +865,7 @@ mod tests { number_of_peers: usize, } - async fn torrent_entry_info(swarm_handle: SwarmHandle) -> TorrentEntryInfo { + async fn torrent_entry_info(swarm_handle: CoordinatorHandle) -> TorrentEntryInfo { let torrent_guard = swarm_handle.lock().await; torrent_guard.clone().into() } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ead05a32d..e50a82933 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_swarm_coordination_registry::{Registry, SwarmHandle}; +use torrust_tracker_swarm_coordination_registry::{CoordinatorHandle, Registry}; /// In-memory repository for torrent entries. /// @@ -110,7 +110,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.swarms.get(key) } @@ -128,7 +128,7 @@ impl InMemoryTorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { self.swarms.get_paginated(pagination) } From 00b9bf998269dec2e64b512fd07a0b4296985166 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 08:00:23 +0100 Subject: [PATCH 113/247] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 33 packages to latest compatible versions Updating anstyle-wincon v3.0.7 -> v3.0.8 Updating async-io v2.4.0 -> v2.4.1 Updating cc v1.2.22 -> v1.2.25 Updating clap v4.5.38 -> v4.5.39 Updating clap_builder v4.5.38 -> v4.5.39 Updating core-foundation v0.10.0 -> v0.10.1 Adding criterion v0.6.0 Removing hermit-abi v0.4.0 Updating hyper-rustls v0.27.5 -> v0.27.6 Updating hyper-util v0.1.11 -> v0.1.13 Updating icu_properties v2.0.0 -> v2.0.1 Updating icu_properties_data v2.0.0 -> v2.0.1 Adding iri-string v0.7.8 Updating libloading v0.8.7 -> v0.8.8 Updating libsqlite3-sys v0.33.0 -> v0.34.0 Removing linux-raw-sys v0.4.15 Updating lock_api v0.4.12 -> v0.4.13 Updating mio v1.0.3 -> v1.0.4 Adding once_cell_polyfill v1.70.1 Updating openssl v0.10.72 -> v0.10.73 Updating openssl-sys v0.9.108 -> v0.9.109 Updating parking_lot v0.12.3 -> v0.12.4 Updating parking_lot_core v0.9.10 -> v0.9.11 Updating polling v3.7.4 -> v3.8.0 Updating r2d2_sqlite v0.28.0 -> v0.29.0 Updating reqwest v0.12.15 -> v0.12.18 Updating rusqlite v0.35.0 -> v0.36.0 Removing rustix v0.38.44 Updating rustversion v1.0.20 -> v1.0.21 Updating socket2 v0.5.9 -> v0.5.10 Updating tokio v1.45.0 -> v1.45.1 Updating tower-http v0.6.4 -> v0.6.5 Updating uuid v1.16.0 -> v1.17.0 Updating windows-core v0.61.1 -> v0.61.2 Updating windows-result v0.3.3 -> v0.3.4 Updating windows-strings v0.4.1 -> v0.4.2 ``` --- Cargo.lock | 239 +++++++++++++++++++++++++++++------------------------ 1 file changed, 131 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecf178a59..35040f516 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,12 +120,12 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" dependencies = [ "anstyle", - "once_cell", + "once_cell_polyfill", "windows-sys 0.59.0", ] @@ -263,9 +263,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" dependencies = [ "async-lock", "cfg-if", @@ -274,7 +274,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix", "slab", "tracing", "windows-sys 0.59.0", @@ -579,7 +579,7 @@ dependencies = [ "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "criterion", + "criterion 0.5.1", "formatjson", "futures", "mockall", @@ -697,7 +697,7 @@ dependencies = [ "bloom", "blowfish", "cipher", - "criterion", + "criterion 0.5.1", "futures", "lazy_static", "mockall", @@ -959,9 +959,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.22" +version = "1.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" +checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951" dependencies = [ "jobserver", "libc", @@ -1052,9 +1052,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "fd60e63e9be68e5fb56422e397cf9baddded06dae1d2e523401542383bc72a9f" dependencies = [ "clap_builder", "clap_derive", @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "89cc6392a1f72bbeb820d71f32108f61fdaf18bc526e1d23954168a67759ef51" dependencies = [ "anstream", "anstyle", @@ -1139,9 +1139,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1199,6 +1199,30 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + [[package]] name = "criterion-plot" version = "0.5.0" @@ -1931,12 +1955,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - [[package]] name = "hermit-abi" version = "0.5.1" @@ -2048,11 +2066,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", @@ -2081,22 +2098,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2187,9 +2210,9 @@ checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", @@ -2203,9 +2226,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" @@ -2303,13 +2326,23 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi", "libc", "windows-sys 0.59.0", ] @@ -2393,9 +2426,9 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", "windows-targets 0.53.0", @@ -2420,9 +2453,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "947e6816f7825b2b45027c2c32e7085da9934defa535de4a6a46b10a4d5257fa" +checksum = "91632f3b4fb6bd1d72aa3d78f41ffecfcf2b1a6648d8c241dbe7dbfaf4875e15" dependencies = [ "cc", "pkg-config", @@ -2440,12 +2473,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -2472,9 +2499,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -2563,13 +2590,13 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2815,6 +2842,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "oorandom" version = "11.1.5" @@ -2823,9 +2856,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -2855,9 +2888,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -2885,9 +2918,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -2895,9 +2928,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3067,15 +3100,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi", "pin-project-lite", - "rustix 0.38.44", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -3277,9 +3310,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8998443b32daee2ad6f528afb19ad77c4a8acc4d8d55b3e5072ed42862fe261a" +checksum = "35006423374afbd4b270acddcbf1e28e60f6bdaaad10c2888b8fd2fba035213c" dependencies = [ "r2d2", "rusqlite", @@ -3435,15 +3468,14 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", - "futures-util", "h2", "http", "http-body", @@ -3460,21 +3492,20 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", "tokio-native-tls", "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", ] [[package]] @@ -3563,9 +3594,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a22715a5d6deef63c637207afbe68d0c72c3f8d0022d7cf9714c442d6157606b" +checksum = "3de23c3319433716cf134eed225fe9986bc24f63bed9be9f20c329029e672dc7" dependencies = [ "bitflags 2.9.1", "fallible-iterator", @@ -3612,19 +3643,6 @@ dependencies = [ "semver", ] -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - [[package]] name = "rustix" version = "1.0.7" @@ -3634,7 +3652,7 @@ dependencies = [ "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys 0.9.4", + "linux-raw-sys", "windows-sys 0.59.0", ] @@ -3695,9 +3713,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" @@ -3770,7 +3788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -4004,9 +4022,9 @@ checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4185,7 +4203,7 @@ dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -4204,7 +4222,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -4371,9 +4389,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" dependencies = [ "backtrace", "bytes", @@ -4762,7 +4780,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion", + "criterion 0.6.0", "thiserror 2.0.12", ] @@ -4826,11 +4844,11 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion", + "criterion 0.6.0", "crossbeam-skiplist", "futures", "mockall", - "rand 0.8.5", + "rand 0.9.1", "rstest", "serde", "thiserror 2.0.12", @@ -4861,7 +4879,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion", + "criterion 0.6.0", "crossbeam-skiplist", "dashmap", "futures", @@ -4926,19 +4944,22 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "5cc2d9e086a412a451384326f521c8123a99a466b329941a9403696bff9b0da2" dependencies = [ "async-compression", "bitflags 2.9.1", "bytes", "futures-core", + "futures-util", "http", "http-body", + "iri-string", "pin-project-lite", "tokio", "tokio-util", + "tower", "tower-layer", "tower-service", "tracing", @@ -5122,12 +5143,14 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "getrandom 0.3.3", + "js-sys", "rand 0.9.1", + "wasm-bindgen", ] [[package]] @@ -5302,15 +5325,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.1" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.1", + "windows-strings 0.4.2", ] [[package]] @@ -5354,9 +5377,9 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] @@ -5372,9 +5395,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -5565,7 +5588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.7", + "rustix", ] [[package]] From caa03cc88a912d8ef2c8041aba5b3eb2ddf6ed95 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 10:47:51 +0100 Subject: [PATCH 114/247] fix: deprecated function criterion::black_box --- contrib/bencode/benches/bencode_benchmark.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs index b79bb0999..b22b286a5 100644 --- a/contrib/bencode/benches/bencode_benchmark.rs +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -1,4 +1,6 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use std::hint::black_box; + +use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_contrib_bencode::{BDecodeOpt, BencodeRef}; const B_NESTED_LISTS: &[u8; 100] = From 9c3c9109f575b221749f6baff0c9909197d46650 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 10:50:19 +0100 Subject: [PATCH 115/247] chore: add GitHhub MCP server config --- .vscode/mcp.json | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .vscode/mcp.json diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 000000000..506a52259 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,26 @@ +{ + "inputs": [ + { + "type": "promptString", + "id": "github_token", + "description": "GitHub Personal Access Token", + "password": true + } + ], + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:github_token}" + } + } + } +} \ No newline at end of file From db1c9b066d3bb5e0458f7d03dbdf6c2a6b251303 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 11:53:06 +0100 Subject: [PATCH 116/247] fix: test after updating dependencies --- .../tests/server/contract.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 0e0d26b83..1d1ba3539 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -119,11 +119,8 @@ mod api { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( @@ -226,11 +223,8 @@ mod http { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( From 52b9660eeb6172fc4a03285751d9fe201eaca7a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 13:49:09 +0100 Subject: [PATCH 117/247] feat: [#1456] wrapper over aquatic RequestParseError to make it sendable The error will be included in the UdpError event ans sent via tokio channel. --- packages/udp-tracker-server/src/error.rs | 34 +++++++++++++++++-- .../udp-tracker-server/src/handlers/error.rs | 20 +++-------- 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 93caf6853..6a63a4c9a 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -1,7 +1,7 @@ //! Error types for the UDP server. use std::panic::Location; -use aquatic_udp_protocol::{ConnectionId, RequestParseError}; +use aquatic_udp_protocol::{ConnectionId, RequestParseError, TransactionId}; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; @@ -17,7 +17,7 @@ pub struct ConnectionCookie(pub ConnectionId); pub enum Error { /// Error returned when the request is invalid. #[error("error when phrasing request: {request_parse_error:?}")] - RequestParseError { request_parse_error: RequestParseError }, + RequestParseError { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. #[error("tracker announce error: {source}")] @@ -47,7 +47,9 @@ pub enum Error { impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { - Self::RequestParseError { request_parse_error } + Self::RequestParseError { + request_parse_error: request_parse_error.into(), + } } } @@ -66,3 +68,29 @@ impl From for Error { } } } + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SendableRequestParseError { + pub message: String, + pub opt_connection_id: Option, + pub opt_transaction_id: Option, +} + +impl From for SendableRequestParseError { + fn from(request_parse_error: RequestParseError) -> Self { + let (message, opt_connection_id, opt_transaction_id) = match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => ((*err).to_string(), Some(connection_id), Some(transaction_id)), + RequestParseError::Unsendable { err } => (err.to_string(), None, None), + }; + + Self { + message, + opt_connection_id, + opt_transaction_id, + } + } +} diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 6259e26ca..7b477d84f 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -2,8 +2,7 @@ use std::net::SocketAddr; use std::ops::Range; -use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; -use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; +use aquatic_udp_protocol::{ErrorResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; @@ -40,25 +39,14 @@ pub async fn handle_error( } let e = if let Error::RequestParseError { request_parse_error } = e { - match request_parse_error { - RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&client_socket_addr), cookie_valid_range) { - (e.to_string(), Some(*transaction_id)) - } else { - ((*err).to_string(), Some(*transaction_id)) - } - } - RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), - } + (request_parse_error.message.clone(), transaction_id) } else { (e.to_string(), transaction_id) }; if e.1.is_some() { + // code-review: why we trigger an event only if transaction_id is present? + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send(Event::UdpError { From 8f3c22aaa3bbdb643545af72c48e27499f3a283c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:29:27 +0100 Subject: [PATCH 118/247] feat: [#1456] expose error kind in the UdpError event Not exposing the original complex error type becuase: - It's too complex. - It forces all errors to be "Sent", "PartialEq". - It would expose a lot of internals. --- packages/tracker-core/src/error.rs | 2 +- .../udp-tracker-core/src/connection_cookie.rs | 2 +- packages/udp-tracker-server/src/error.rs | 13 +++++- packages/udp-tracker-server/src/event.rs | 45 ++++++++++++++++++- .../udp-tracker-server/src/handlers/error.rs | 11 ++--- .../src/statistics/event/handler.rs | 6 ++- 6 files changed, 68 insertions(+), 11 deletions(-) diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 4a35e9a0b..866aa64c5 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -84,7 +84,7 @@ pub enum ScrapeError { /// /// This error is returned when an operation involves a torrent that is not /// present in the whitelist. -#[derive(thiserror::Error, Debug, Clone)] +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] pub enum WhitelistError { /// Indicates that the torrent identified by `info_hash` is not whitelisted. #[error("The torrent: {info_hash}, is not whitelisted, {location}")] diff --git a/packages/udp-tracker-core/src/connection_cookie.rs b/packages/udp-tracker-core/src/connection_cookie.rs index 31c116400..ce255705f 100644 --- a/packages/udp-tracker-core/src/connection_cookie.rs +++ b/packages/udp-tracker-core/src/connection_cookie.rs @@ -86,7 +86,7 @@ use zerocopy::AsBytes; use crate::crypto::keys::CipherArrayBlowfish; /// Error returned when there was an error with the connection cookie. -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq)] pub enum ConnectionCookieError { #[error("cookie value is not normal: {not_normal_value}")] ValueNotNormal { not_normal_value: f64 }, diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 6a63a4c9a..d45b96569 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -1,4 +1,5 @@ //! Error types for the UDP server. +use std::fmt::Display; use std::panic::Location; use aquatic_udp_protocol::{ConnectionId, RequestParseError, TransactionId}; @@ -13,7 +14,7 @@ use torrust_tracker_located_error::LocatedError; pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum Error { /// Error returned when the request is invalid. #[error("error when phrasing request: {request_parse_error:?}")] @@ -76,6 +77,16 @@ pub struct SendableRequestParseError { pub opt_transaction_id: Option, } +impl Display for SendableRequestParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "SendableRequestParseError: message: {}, connection_id: {:?}, transaction_id: {:?}", + self.message, self.opt_connection_id, self.opt_transaction_id + ) + } +} + impl From for SendableRequestParseError { fn from(request_parse_error: RequestParseError) -> Self { let (message, opt_connection_id, opt_transaction_id) = match request_parse_error { diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 8aabd7ffb..4d3646563 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -2,12 +2,17 @@ use std::fmt; use std::net::SocketAddr; use std::time::Duration; +use bittorrent_tracker_core::error::{AnnounceError, ScrapeError}; +use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; +use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; +use crate::error::Error; + /// A UDP server event. -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum Event { UdpRequestReceived { context: ConnectionContext, @@ -30,6 +35,7 @@ pub enum Event { UdpError { context: ConnectionContext, kind: Option, + error: ErrorKind, }, } @@ -109,6 +115,43 @@ impl From for LabelSet { } } +#[derive(Debug, Clone, PartialEq)] +pub enum ErrorKind { + RequestParse(String), + ConnectionCookie(String), + Whitelist(String), + Database(String), + InternalServer(String), + BadRequest(String), + TrackerAuthentication(String), +} + +impl From for ErrorKind { + fn from(error: Error) -> Self { + match error { + Error::RequestParseError { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), + Error::UdpAnnounceError { source } => match source { + UdpAnnounceError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpAnnounceError::TrackerCoreAnnounceError { source } => match source { + AnnounceError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + AnnounceError::Database(error) => Self::Database(error.to_string()), + }, + UdpAnnounceError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::UdpScrapeError { source } => match source { + UdpScrapeError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpScrapeError::TrackerCoreScrapeError { source } => match source { + ScrapeError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + }, + UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), + Error::BadRequest { source } => Self::BadRequest(source.to_string()), + Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), + } + } +} + pub mod sender { use std::sync::Arc; diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 7b477d84f..54163aca5 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -21,7 +21,7 @@ pub async fn handle_error( request_id: Uuid, opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, - e: &Error, + error: &Error, transaction_id: Option, ) -> Response { tracing::trace!("handle error"); @@ -31,17 +31,17 @@ pub async fn handle_error( match transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); } None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } - let e = if let Error::RequestParseError { request_parse_error } = e { + let e = if let Error::RequestParseError { request_parse_error } = error { (request_parse_error.message.clone(), transaction_id) } else { - (e.to_string(), transaction_id) + (error.to_string(), transaction_id) }; if e.1.is_some() { @@ -52,6 +52,7 @@ pub async fn handle_error( .send(Event::UdpError { context: ConnectionContext::new(client_socket_addr, server_service_binding), kind: req_kind, + error: error.clone().into(), }) .await; } diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 1e1502339..b231d8336 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -232,7 +232,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpError { context, kind } => { + Event::UdpError { context, kind, error: _ } => { // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { @@ -271,7 +271,7 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -518,6 +518,7 @@ mod tests { .unwrap(), ), kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, CurrentClock::now(), @@ -650,6 +651,7 @@ mod tests { .unwrap(), ), kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, CurrentClock::now(), From d7902f1d670bf4411303fa3934e0a4ce595a20ef Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:36:50 +0100 Subject: [PATCH 119/247] refactor: [#1456] remove unused enum variant in udp server error --- Cargo.lock | 1 - packages/udp-tracker-server/Cargo.toml | 1 - packages/udp-tracker-server/src/error.rs | 7 ------- packages/udp-tracker-server/src/event.rs | 1 - packages/udp-tracker-server/src/handlers/mod.rs | 1 + 5 files changed, 1 insertion(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35040f516..feb749d3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4915,7 +4915,6 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", - "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-swarm-coordination-registry", diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 72fa520ba..c0bc94ce3 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -30,7 +30,6 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } -torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index d45b96569..aecf960b8 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -7,7 +7,6 @@ use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; use thiserror::Error; -use torrust_tracker_located_error::LocatedError; #[derive(Display, Debug)] #[display(":?")] @@ -35,12 +34,6 @@ pub enum Error { message: String, }, - /// Error returned when the request is invalid. - #[error("bad request: {source}")] - BadRequest { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - /// Error returned when tracker requires authentication. #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] TrackerAuthenticationRequired { location: &'static Location<'static> }, diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 4d3646563..e320ceb8a 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -146,7 +146,6 @@ impl From for ErrorKind { UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), - Error::BadRequest { source } => Self::BadRequest(source.to_string()), Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index df550ab72..6785bd293 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -109,6 +109,7 @@ pub(crate) async fn handle_packet( } }, Err(e) => { + // The request payload could not be parsed, so we handle it as an error. let response = handle_error( None, udp_request.from, From 0108c26b6db35d11522589cb20ce62904a97c059 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:54:40 +0100 Subject: [PATCH 120/247] fix: test. Error message changed --- packages/udp-tracker-server/src/error.rs | 2 +- packages/udp-tracker-server/tests/server/contract.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index aecf960b8..697cc5cab 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -16,7 +16,7 @@ pub struct ConnectionCookie(pub ConnectionId); #[derive(Error, Debug, Clone)] pub enum Error { /// Error returned when the request is invalid. - #[error("error when phrasing request: {request_parse_error:?}")] + #[error("error parsing request: {request_parse_error:?}")] RequestParseError { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 860fd1f0b..04ad0f39d 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -59,7 +59,9 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req let response = Response::parse_bytes(&response, true).unwrap(); - assert_eq!(get_error_response_message(&response).unwrap(), "Protocol identifier missing"); + assert!(get_error_response_message(&response) + .unwrap() + .contains("Protocol identifier missing")); env.stop().await; } From f485501f8e7705fe886932d5889b79c8eafb9057 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:55:24 +0100 Subject: [PATCH 121/247] refactor: [#1456 clean code --- .../udp-tracker-server/src/handlers/error.rs | 16 +++++----------- packages/udp-tracker-server/src/handlers/mod.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 54163aca5..4ebe24075 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -22,13 +22,13 @@ pub async fn handle_error( opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, error: &Error, - transaction_id: Option, + opt_transaction_id: Option, ) -> Response { tracing::trace!("handle error"); let server_socket_addr = server_service_binding.bind_address(); - match transaction_id { + match opt_transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); @@ -38,13 +38,7 @@ pub async fn handle_error( } } - let e = if let Error::RequestParseError { request_parse_error } = error { - (request_parse_error.message.clone(), transaction_id) - } else { - (error.to_string(), transaction_id) - }; - - if e.1.is_some() { + if opt_transaction_id.is_some() { // code-review: why we trigger an event only if transaction_id is present? if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { @@ -59,7 +53,7 @@ pub async fn handle_error( } Response::from(ErrorResponse { - transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), - message: e.0.into(), + transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), + message: error.to_string().into(), }) } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 6785bd293..69c62a638 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -110,6 +110,13 @@ pub(crate) async fn handle_packet( }, Err(e) => { // The request payload could not be parsed, so we handle it as an error. + + let opt_transaction_id = if let Error::RequestParseError { request_parse_error } = e.clone() { + request_parse_error.opt_transaction_id + } else { + None + }; + let response = handle_error( None, udp_request.from, @@ -118,7 +125,7 @@ pub(crate) async fn handle_packet( &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, - None, + opt_transaction_id, ) .await; From 525ab738d485a15175a8924520d88f66515f927a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:04:25 +0100 Subject: [PATCH 122/247] refactor: [#1456] extract methods --- .../udp-tracker-server/src/handlers/error.rs | 41 ++++++++++++++++--- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 4ebe24075..af530efd6 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -28,6 +28,32 @@ pub async fn handle_error( let server_socket_addr = server_service_binding.bind_address(); + log_error(error, client_socket_addr, server_socket_addr, opt_transaction_id, request_id); + + trigger_udp_error_event( + error.clone(), + client_socket_addr, + server_service_binding, + opt_transaction_id, + opt_udp_server_stats_event_sender, + req_kind, + ) + .await; + + Response::from(ErrorResponse { + transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), + message: error.to_string().into(), + }) +} + +fn log_error( + error: &Error, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, + opt_transaction_id: Option, + + request_id: Uuid, +) { match opt_transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); @@ -37,7 +63,17 @@ pub async fn handle_error( tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } +} + +async fn trigger_udp_error_event( + error: Error, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + opt_transaction_id: Option, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, + req_kind: Option, +) { if opt_transaction_id.is_some() { // code-review: why we trigger an event only if transaction_id is present? @@ -51,9 +87,4 @@ pub async fn handle_error( .await; } } - - Response::from(ErrorResponse { - transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), - message: error.to_string().into(), - }) } From ad1b19a366573dd24f35c3d6250758ee082ba9f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:09:45 +0100 Subject: [PATCH 123/247] feat: trigger UDP error event when there is no transaction ID too --- .../udp-tracker-server/src/handlers/error.rs | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index af530efd6..7fb4141b2 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -31,10 +31,9 @@ pub async fn handle_error( log_error(error, client_socket_addr, server_socket_addr, opt_transaction_id, request_id); trigger_udp_error_event( - error.clone(), + error, client_socket_addr, server_service_binding, - opt_transaction_id, opt_udp_server_stats_event_sender, req_kind, ) @@ -51,7 +50,6 @@ fn log_error( client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, opt_transaction_id: Option, - request_id: Uuid, ) { match opt_transaction_id { @@ -66,25 +64,19 @@ fn log_error( } async fn trigger_udp_error_event( - error: Error, + error: &Error, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding, - opt_transaction_id: Option, - opt_udp_server_stats_event_sender: &crate::event::sender::Sender, req_kind: Option, ) { - if opt_transaction_id.is_some() { - // code-review: why we trigger an event only if transaction_id is present? - - if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - udp_server_stats_event_sender - .send(Event::UdpError { - context: ConnectionContext::new(client_socket_addr, server_service_binding), - kind: req_kind, - error: error.clone().into(), - }) - .await; - } + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + udp_server_stats_event_sender + .send(Event::UdpError { + context: ConnectionContext::new(client_socket_addr, server_service_binding), + kind: req_kind, + error: error.clone().into(), + }) + .await; } } From 21bea5b4bf30f3c220b443fed839521df50f453c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:35:17 +0100 Subject: [PATCH 124/247] refactor: [#1456] increase ban counters asyncronously --- .../udp-tracker-server/src/environment.rs | 1 + .../udp-tracker-server/src/handlers/mod.rs | 10 ---- .../src/statistics/event/handler.rs | 54 +++++++++++++++++-- .../src/statistics/event/listener.rs | 17 ++++-- src/bootstrap/jobs/udp_tracker_server.rs | 1 + 5 files changed, 65 insertions(+), 18 deletions(-) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 3f479a02d..268259f1b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -82,6 +82,7 @@ impl Environment { let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_server_container.stats_repository, + &self.container.udp_tracker_core_container.ban_service, )); // Start the UDP tracker server diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 69c62a638..0bd455701 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -13,7 +13,6 @@ use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; use bittorrent_tracker_core::MAX_SCRAPE_TORRENTS; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; @@ -84,15 +83,6 @@ pub(crate) async fn handle_packet( { Ok((response, req_kid)) => return (response, Some(req_kid)), Err((error, transaction_id, req_kind)) => { - if let Error::UdpAnnounceError { - source: UdpAnnounceError::ConnectionCookieError { .. }, - } = error - { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_core_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - let response = handle_error( Some(req_kind.clone()), udp_request.from, diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index b231d8336..394850844 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,8 +1,12 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use crate::event::{Event, UdpRequestKind, UdpResponseKind}; +use crate::event::{ErrorKind, Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, @@ -16,7 +20,12 @@ use crate::statistics::{ /// This function panics if the client IP version does not match the expected /// version. #[allow(clippy::too_many_lines)] -pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { +pub async fn handle_event( + event: Event, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { match event { Event::UdpRequestAborted { context } => { // Global fixed metrics @@ -232,7 +241,14 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpError { context, kind, error: _ } => { + Event::UdpError { context, kind, error } => { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } + // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { @@ -267,7 +283,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -279,6 +297,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -292,6 +311,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -304,6 +324,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -317,6 +338,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -329,6 +351,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestReceived { @@ -342,6 +365,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -354,6 +378,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -367,6 +392,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -376,6 +402,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -389,6 +416,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -399,6 +427,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -413,6 +442,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -425,6 +455,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -439,6 +470,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -451,6 +483,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -465,6 +498,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -477,6 +511,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -494,6 +529,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -506,6 +542,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -521,6 +558,7 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -533,6 +571,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -547,6 +586,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -559,6 +599,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -573,6 +614,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -585,6 +627,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -599,6 +642,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -611,6 +655,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -628,6 +673,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -639,6 +685,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -654,6 +701,7 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index d805cc87f..e6c9a85ce 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,6 +1,8 @@ use std::sync::Arc; +use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -11,19 +13,24 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { - let stats_repository = repository.clone(); +pub fn run_event_listener( + receiver: Receiver, + repository: &Arc, + ban_service: &Arc>, +) -> JoinHandle<()> { + let repository_clone = repository.clone(); + let ban_service_clone = ban_service.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, repository_clone, ban_service_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc, ban_service: Arc>) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -38,7 +45,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match result { - Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, &ban_service, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 42ac2d03e..8a4c2a273 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -10,6 +10,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Mon, 2 Jun 2025 17:49:41 +0100 Subject: [PATCH 125/247] refactor: rename UDP tracker server error variants --- packages/udp-tracker-server/src/error.rs | 16 ++++++++-------- packages/udp-tracker-server/src/event.rs | 10 +++++----- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 697cc5cab..d260ebfd4 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -17,31 +17,31 @@ pub struct ConnectionCookie(pub ConnectionId); pub enum Error { /// Error returned when the request is invalid. #[error("error parsing request: {request_parse_error:?}")] - RequestParseError { request_parse_error: SendableRequestParseError }, + InvalidRequest { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. #[error("tracker announce error: {source}")] - UdpAnnounceError { source: UdpAnnounceError }, + AnnounceFailed { source: UdpAnnounceError }, /// Error returned when the domain tracker returns an scrape error. #[error("tracker scrape error: {source}")] - UdpScrapeError { source: UdpScrapeError }, + ScrapeFailed { source: UdpScrapeError }, /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] - InternalServer { + Internal { location: &'static Location<'static>, message: String, }, /// Error returned when tracker requires authentication. #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] - TrackerAuthenticationRequired { location: &'static Location<'static> }, + AuthRequired { location: &'static Location<'static> }, } impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { - Self::RequestParseError { + Self::InvalidRequest { request_parse_error: request_parse_error.into(), } } @@ -49,7 +49,7 @@ impl From for Error { impl From for Error { fn from(udp_announce_error: UdpAnnounceError) -> Self { - Self::UdpAnnounceError { + Self::AnnounceFailed { source: udp_announce_error, } } @@ -57,7 +57,7 @@ impl From for Error { impl From for Error { fn from(udp_scrape_error: UdpScrapeError) -> Self { - Self::UdpScrapeError { + Self::ScrapeFailed { source: udp_scrape_error, } } diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index e320ceb8a..4fa29940e 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -129,8 +129,8 @@ pub enum ErrorKind { impl From for ErrorKind { fn from(error: Error) -> Self { match error { - Error::RequestParseError { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), - Error::UdpAnnounceError { source } => match source { + Error::InvalidRequest { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), + Error::AnnounceFailed { source } => match source { UdpAnnounceError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), UdpAnnounceError::TrackerCoreAnnounceError { source } => match source { AnnounceError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), @@ -138,15 +138,15 @@ impl From for ErrorKind { }, UdpAnnounceError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::UdpScrapeError { source } => match source { + Error::ScrapeFailed { source } => match source { UdpScrapeError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), UdpScrapeError::TrackerCoreScrapeError { source } => match source { ScrapeError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), }, UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), - Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), + Error::Internal { location: _, message } => Self::InternalServer(message.to_string()), + Error::AuthRequired { location } => Self::TrackerAuthentication(location.to_string()), } } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 0bd455701..c1125b97f 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -101,7 +101,7 @@ pub(crate) async fn handle_packet( Err(e) => { // The request payload could not be parsed, so we handle it as an error. - let opt_transaction_id = if let Error::RequestParseError { request_parse_error } = e.clone() { + let opt_transaction_id = if let Error::InvalidRequest { request_parse_error } = e.clone() { request_parse_error.opt_transaction_id } else { None From 89ac87cbc1c26fd93e6a019faeb10161f9f6e058 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 18:03:25 +0100 Subject: [PATCH 126/247] refactor: [#1551] extract methods in udp event handler" --- .../src/statistics/event/handler.rs | 482 +++++++++--------- 1 file changed, 254 insertions(+), 228 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 394850844..a1e9007e9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -6,7 +6,7 @@ use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use crate::event::{ErrorKind, Event, UdpRequestKind, UdpResponseKind}; +use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, @@ -15,10 +15,6 @@ use crate::statistics::{ UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, }; -/// # Panics -/// -/// This function panics if the client IP version does not match the expected -/// version. #[allow(clippy::too_many_lines)] pub async fn handle_event( event: Event, @@ -28,256 +24,286 @@ pub async fn handle_event( ) { match event { Event::UdpRequestAborted { context } => { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_aborted_event(context, stats_repository, now).await; } Event::UdpRequestBanned { context } => { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_banned_event(context, stats_repository, now).await; } Event::UdpRequestReceived { context } => { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_received_event(context, stats_repository, now).await; } Event::UdpRequestAccepted { context, kind } => { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics - - let mut label_set = LabelSet::from(context); - - label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); - - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_accepted_event(context, kind, stats_repository, now).await; } Event::UdpResponseSent { context, kind, req_processing_time, } => { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } + handle_udp_response_sent_event(context, kind, req_processing_time, stats_repository, now).await; + } + Event::UdpError { context, kind, error } => { + handle_udp_error_event(context, kind, error, stats_repository, ban_service, now).await; + } + } - let (result_label_value, kind_label_value) = match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) - } - UdpRequestKind::Announce => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) - } - UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) - } - }, - UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), - }; + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} - // Extendable metrics +async fn handle_udp_request_aborted_event( + context: ConnectionContext, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + stats_repository.increase_udp_requests_aborted().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} - let mut label_set = LabelSet::from(context); +async fn handle_udp_request_banned_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_banned().await; - if result_label_value == LabelValue::new("ok") { - label_set.upsert(label_name!("request_kind"), kind_label_value); - } - label_set.upsert(label_name!("result"), result_label_value); - - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_request_received_event( + context: ConnectionContext, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_requests().await; } - Event::UdpError { context, kind, error } => { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? - if let ErrorKind::ConnectionCookie(_msg) = error { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); - } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_requests().await; + } + } - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; - } + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_request_accepted_event( + context: ConnectionContext, + kind: UdpRequestKind, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, + } - // Extendable metrics + // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} - let mut label_set = LabelSet::from(context); +/// # Panics +/// +/// This function panics if the client IP version does not match the expected +/// version. +async fn handle_udp_response_sent_event( + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: std::time::Duration, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_responses().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_responses().await; + } + } - if let Some(kind) = kind { - label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + let (result_label_value, kind_label_value) = match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + let new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } + UdpRequestKind::Announce => { + let new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + } + UdpRequestKind::Scrape => { + let new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) + } + }, + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), + }; + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(label_name!("request_kind"), kind_label_value); + } + label_set.upsert(label_name!("result"), result_label_value); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_error_event( + context: ConnectionContext, + kind: Option, + error: ErrorKind, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_errors().await; } } - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); + // Extendable metrics + let mut label_set = LabelSet::from(context); + if let Some(kind) = kind { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } #[cfg(test)] From a8f3a973c661815b7721d87cc24b828915d0deec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 18:47:46 +0100 Subject: [PATCH 127/247] refactor: [#1551] extract event handler for each udp event --- .../src/statistics/event/handler.rs | 739 ------------------ .../src/statistics/event/handler/error.rs | 95 +++ .../src/statistics/event/handler/mod.rs | 49 ++ .../event/handler/request_aborted.rs | 92 +++ .../event/handler/request_accepted.rs | 236 ++++++ .../event/handler/request_banned.rs | 92 +++ .../event/handler/request_received.rs | 74 ++ .../statistics/event/handler/response_sent.rs | 182 +++++ 8 files changed, 820 insertions(+), 739 deletions(-) delete mode 100644 packages/udp-tracker-server/src/statistics/event/handler.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/error.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/mod.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_received.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs deleted file mode 100644 index a1e9007e9..000000000 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ /dev/null @@ -1,739 +0,0 @@ -use std::sync::Arc; - -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; -use torrust_tracker_metrics::label::{LabelSet, LabelValue}; -use torrust_tracker_metrics::{label_name, metric_name}; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind, UdpResponseKind}; -use crate::statistics::repository::Repository; -use crate::statistics::{ - UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, - UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, - UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, -}; - -#[allow(clippy::too_many_lines)] -pub async fn handle_event( - event: Event, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { - match event { - Event::UdpRequestAborted { context } => { - handle_udp_request_aborted_event(context, stats_repository, now).await; - } - Event::UdpRequestBanned { context } => { - handle_udp_request_banned_event(context, stats_repository, now).await; - } - Event::UdpRequestReceived { context } => { - handle_udp_request_received_event(context, stats_repository, now).await; - } - Event::UdpRequestAccepted { context, kind } => { - handle_udp_request_accepted_event(context, kind, stats_repository, now).await; - } - Event::UdpResponseSent { - context, - kind, - req_processing_time, - } => { - handle_udp_response_sent_event(context, kind, req_processing_time, stats_repository, now).await; - } - Event::UdpError { context, kind, error } => { - handle_udp_error_event(context, kind, error, stats_repository, ban_service, now).await; - } - } - - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); -} - -async fn handle_udp_request_aborted_event( - context: ConnectionContext, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_banned_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_received_event( - context: ConnectionContext, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_accepted_event( - context: ConnectionContext, - kind: UdpRequestKind, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -/// # Panics -/// -/// This function panics if the client IP version does not match the expected -/// version. -async fn handle_udp_response_sent_event( - context: ConnectionContext, - kind: UdpResponseKind, - req_processing_time: std::time::Duration, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } - - let (result_label_value, kind_label_value) = match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) - } - UdpRequestKind::Announce => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) - } - UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) - } - }, - UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), - }; - - // Extendable metrics - let mut label_set = LabelSet::from(context); - if result_label_value == LabelValue::new("ok") { - label_set.upsert(label_name!("request_kind"), kind_label_value); - } - label_set.upsert(label_name!("result"), result_label_value); - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_error_event( - context: ConnectionContext, - kind: Option, - error: ErrorKind, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? - if let ErrorKind::ConnectionCookie(_msg) = error { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); - } - - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); - if let Some(kind) = kind { - label_set.upsert(label_name!("request_kind"), kind.to_string().into()); - } - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - - use bittorrent_udp_tracker_core::services::banning::BanService; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - - use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind}; - use crate::statistics::event::handler::handle_event; - use crate::statistics::repository::Repository; - use crate::CurrentClock; - - #[tokio::test] - async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAborted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp_requests_aborted, 1); - } - - #[tokio::test] - async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestBanned { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestReceived { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAborted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); - } - #[tokio::test] - async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestBanned { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Connect, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Announce, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Scrape, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpResponseSent { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, - }, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_responses, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpError { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: None, - error: ErrorKind::RequestParse("Invalid request format".to_string()), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_errors_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Connect, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Announce, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Scrape, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpResponseSent { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, - }, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_responses, 1); - } - #[tokio::test] - async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpError { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: None, - error: ErrorKind::RequestParse("Invalid request format".to_string()), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_errors_handled, 1); - } -} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs new file mode 100644 index 000000000..e1023a56b --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -0,0 +1,95 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, ErrorKind, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_ERRORS_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: Option, + error: ErrorKind, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } + + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_errors().await; + } + } + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if let Some(kind) = kind { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::error::ErrorKind; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpError { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_errors_handled, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs new file mode 100644 index 000000000..c8ac864a3 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs @@ -0,0 +1,49 @@ +mod error; +mod request_aborted; +mod request_accepted; +mod request_banned; +mod request_received; +mod response_sent; + +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; + +pub async fn handle_event( + event: Event, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + match event { + Event::UdpRequestAborted { context } => { + request_aborted::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestBanned { context } => { + request_banned::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestReceived { context } => { + request_received::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestAccepted { context, kind } => { + request_accepted::handle_event(context, kind, stats_repository, now).await; + } + Event::UdpResponseSent { + context, + kind, + req_processing_time, + } => { + response_sent::handle_event(context, kind, req_processing_time, stats_repository, now).await; + } + Event::UdpError { context, kind, error } => { + error::handle_event(context, kind, error, stats_repository, ban_service, now).await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs new file mode 100644 index 000000000..270ec2a45 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -0,0 +1,92 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_aborted().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_aborted, 1); + } + + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs new file mode 100644 index 000000000..25c1311e5 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -0,0 +1,236 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpRequestKind, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, + } + + // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs new file mode 100644 index 000000000..74641574a --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -0,0 +1,92 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_banned().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_banned, 1); + } + + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs new file mode 100644 index 000000000..8333258c2 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -0,0 +1,74 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_requests().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_requests().await; + } + } + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestReceived { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_requests, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs new file mode 100644 index 000000000..a69184e08 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -0,0 +1,182 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind, UdpResponseKind}; +use crate::statistics::repository::Repository; +use crate::statistics::{UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL}; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: std::time::Duration, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_responses().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_responses().await; + } + } + + let (result_label_value, kind_label_value) = match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + let new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + } + UdpRequestKind::Announce => { + let new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + } + UdpRequestKind::Scrape => { + let new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) + } + }, + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), + }; + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(label_name!("request_kind"), kind_label_value); + } + label_set.upsert(label_name!("result"), result_label_value); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_responses, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_responses, 1); + } +} From d9f4c13fa860b835dc2299f9d2688a9467faef73 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 10:22:14 +0100 Subject: [PATCH 128/247] refactor: [#1556] extract functions --- .../src/statistics/event/handler/error.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index e1023a56b..5cd57e12b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -18,14 +18,17 @@ pub async fn handle_event( ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? if let ErrorKind::ConnectionCookie(_msg) = error { let mut ban_service = ban_service.write().await; ban_service.increase_counter(&context.client_socket_addr().ip()); } - // Global fixed metrics + update_global_fixed_metrics(&context, stats_repository).await; + + update_extendable_metrics(&context, kind, stats_repository, now).await; +} + +async fn update_global_fixed_metrics(context: &ConnectionContext, stats_repository: &Repository) { match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_errors().await; @@ -34,9 +37,15 @@ pub async fn handle_event( stats_repository.increase_udp6_errors().await; } } +} - // Extendable metrics - let mut label_set = LabelSet::from(context); +async fn update_extendable_metrics( + context: &ConnectionContext, + kind: Option, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(context.clone()); if let Some(kind) = kind { label_set.upsert(label_name!("request_kind"), kind.to_string().into()); } From 7e616d71afe16e82968e56185df45ee695588e8a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 12:12:05 +0100 Subject: [PATCH 129/247] feat: [#1556] add a new metric to count connection ID errors per clietn software The new metric in Prometheous format: ``` udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="0.12"} 2 ``` --- cSpell.json | 1 + packages/udp-tracker-server/src/event.rs | 15 ++- .../src/handlers/announce.rs | 55 +++++---- .../udp-tracker-server/src/handlers/mod.rs | 2 +- .../src/server/processor.rs | 17 ++- .../src/statistics/event/handler/error.rs | 105 ++++++++++++++---- .../event/handler/request_accepted.rs | 11 +- .../statistics/event/handler/response_sent.rs | 17 ++- .../udp-tracker-server/src/statistics/mod.rs | 7 ++ 9 files changed, 168 insertions(+), 62 deletions(-) diff --git a/cSpell.json b/cSpell.json index e384a08d9..fcbf53f1f 100644 --- a/cSpell.json +++ b/cSpell.json @@ -127,6 +127,7 @@ "proto", "Quickstart", "Radeon", + "Rakshasa", "Rasterbar", "realpath", "reannounce", diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 4fa29940e..152545e6a 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -2,6 +2,7 @@ use std::fmt; use std::net::SocketAddr; use std::time::Duration; +use aquatic_udp_protocol::AnnounceRequest; use bittorrent_tracker_core::error::{AnnounceError, ScrapeError}; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; @@ -42,15 +43,25 @@ pub enum Event { #[derive(Debug, PartialEq, Eq, Clone)] pub enum UdpRequestKind { Connect, - Announce, + Announce { announce_request: AnnounceRequest }, Scrape, } +impl From for LabelValue { + fn from(kind: UdpRequestKind) -> Self { + match kind { + UdpRequestKind::Connect => LabelValue::new("connect"), + UdpRequestKind::Announce { .. } => LabelValue::new("announce"), + UdpRequestKind::Scrape => LabelValue::new("scrape"), + } + } +} + impl fmt::Display for UdpRequestKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let proto_str = match self { UdpRequestKind::Connect => "connect", - UdpRequestKind::Announce => "announce", + UdpRequestKind::Announce { .. } => "announce", UdpRequestKind::Scrape => "scrape", }; write!(f, "{proto_str}") diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 2fc3f6e63..901a1434a 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -44,7 +44,9 @@ pub async fn handle_announce( udp_server_stats_event_sender .send(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { + announce_request: *request, + }, }) .await; } @@ -52,7 +54,15 @@ pub async fn handle_announce( let announce_data = announce_service .handle_announce(client_socket_addr, server_service_binding, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Announce))?; + .map_err(|e| { + ( + e.into(), + request.transaction_id, + UdpRequestKind::Announce { + announce_request: *request, + }, + ) + })?; Ok(build_response(client_socket_addr, request, core_config, &announce_data)) } @@ -118,9 +128,9 @@ fn build_response( } #[cfg(test)] -mod tests { +pub(crate) mod tests { - mod announce_request { + pub mod announce_request { use std::net::Ipv4Addr; use std::num::NonZeroU16; @@ -133,7 +143,7 @@ mod tests { use crate::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; - struct AnnounceRequestBuilder { + pub struct AnnounceRequestBuilder { request: AnnounceRequest, } @@ -431,13 +441,14 @@ mod tests { let client_socket_addr = sample_ipv4_socket_address(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let announce_request = AnnounceRequestBuilder::default().into(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -451,7 +462,7 @@ mod tests { &core_udp_tracker_services.announce_service, client_socket_addr, server_service_binding, - &AnnounceRequestBuilder::default().into(), + &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -795,12 +806,16 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .into(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -810,10 +825,6 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) - .into(); - handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, @@ -887,6 +898,14 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send() @@ -912,7 +931,9 @@ mod tests { .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding_clone.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { + announce_request: request, + }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -926,14 +947,6 @@ mod tests { &db_downloads_metric_repository, )); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - let core_config = Arc::new(config.core.clone()); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index c1125b97f..3c8204bf5 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -177,7 +177,7 @@ pub async fn handle_request( ) .await { - Ok(response) => Ok((response, UdpRequestKind::Announce)), + Ok(response) => Ok((response, UdpRequestKind::Announce { announce_request })), Err(err) => Err(err), } } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 6b877f85b..dd6ba633d 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -87,16 +87,15 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Connect, - }, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Announce, - }, - Response::Scrape(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Scrape, - }, Response::Error(_e) => event::UdpResponseKind::Error { opt_req_kind: None }, + _ => { + if let Some(req_kind) = opt_req_kind { + event::UdpResponseKind::Ok { req_kind } + } else { + // code-review: this case should never happen. + event::UdpResponseKind::Error { opt_req_kind } + } + } }; let mut writer = Cursor::new(Vec::with_capacity(200)); diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 5cd57e12b..7327386a3 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use aquatic_udp_protocol::PeerClient; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; use torrust_tracker_metrics::label::LabelSet; @@ -8,54 +9,118 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ConnectionContext, ErrorKind, UdpRequestKind}; use crate::statistics::repository::Repository; -use crate::statistics::UDP_TRACKER_SERVER_ERRORS_TOTAL; +use crate::statistics::{UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL, UDP_TRACKER_SERVER_ERRORS_TOTAL}; pub async fn handle_event( - context: ConnectionContext, - kind: Option, - error: ErrorKind, - stats_repository: &Repository, + connection_context: ConnectionContext, + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - if let ErrorKind::ConnectionCookie(_msg) = error { + if let ErrorKind::ConnectionCookie(_msg) = error_kind.clone() { let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); + ban_service.increase_counter(&connection_context.client_socket_addr().ip()); } - update_global_fixed_metrics(&context, stats_repository).await; + update_global_fixed_metrics(&connection_context, repository).await; - update_extendable_metrics(&context, kind, stats_repository, now).await; + update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } -async fn update_global_fixed_metrics(context: &ConnectionContext, stats_repository: &Repository) { - match context.client_socket_addr().ip() { +async fn update_global_fixed_metrics(connection_context: &ConnectionContext, repository: &Repository) { + match connection_context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; + repository.increase_udp4_errors().await; } std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; + repository.increase_udp6_errors().await; } } } async fn update_extendable_metrics( - context: &ConnectionContext, - kind: Option, - stats_repository: &Repository, + connection_context: &ConnectionContext, + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, now: DurationSinceUnixEpoch, ) { - let mut label_set = LabelSet::from(context.clone()); - if let Some(kind) = kind { + update_all_errors_counter(connection_context, opt_udp_request_kind.clone(), repository, now).await; + update_connection_id_errors_counter(opt_udp_request_kind, error_kind, repository, now).await; +} + +async fn update_all_errors_counter( + connection_context: &ConnectionContext, + opt_udp_request_kind: Option, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(connection_context.clone()); + + if let Some(kind) = opt_udp_request_kind.clone() { label_set.upsert(label_name!("request_kind"), kind.to_string().into()); } - match stats_repository + + match repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) .await { Ok(()) => {} Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + } +} + +async fn update_connection_id_errors_counter( + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + if let ErrorKind::ConnectionCookie(_) = error_kind { + if let Some(UdpRequestKind::Announce { announce_request }) = opt_udp_request_kind { + let (client_software_name, client_software_version) = extract_name_and_version(&announce_request.peer_id.client()); + + let label_set = LabelSet::from([ + (label_name!("client_software_name"), client_software_name.into()), + (label_name!("client_software_version"), client_software_version.into()), + ]); + + match repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; + } + } +} + +fn extract_name_and_version(peer_client: &PeerClient) -> (String, String) { + match peer_client { + PeerClient::BitTorrent(compact_string) => ("BitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Deluge(compact_string) => ("Deluge".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRakshasa(compact_string) => ("lt (rakshasa)".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRasterbar(compact_string) => ("lt (rasterbar)".to_string(), compact_string.as_str().to_owned()), + PeerClient::QBitTorrent(compact_string) => ("QBitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Transmission(compact_string) => ("Transmission".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrent(compact_string) => ("µTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentEmbedded(compact_string) => ("µTorrent Emb.".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentMac(compact_string) => ("µTorrent Mac".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentWeb(compact_string) => ("µTorrent Web".to_string(), compact_string.as_str().to_owned()), + PeerClient::Vuze(compact_string) => ("Vuze".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrent(compact_string) => ("WebTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrentDesktop(compact_string) => ("WebTorrent Desktop".to_string(), compact_string.as_str().to_owned()), + PeerClient::Mainline(compact_string) => ("Mainline".to_string(), compact_string.as_str().to_owned()), + PeerClient::OtherWithPrefixAndVersion { prefix, version } => { + (format!("Other ({})", prefix.as_str()), version.as_str().to_owned()) + } + PeerClient::OtherWithPrefix(compact_string) => (format!("Other ({compact_string})"), String::new()), + PeerClient::Other => ("Other".to_string(), String::new()), + _ => ("Unknown".to_string(), String::new()), + } } #[cfg(test)] diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 25c1311e5..0007a18b0 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -22,7 +22,7 @@ pub async fn handle_event( stats_repository.increase_udp6_connections().await; } }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { + UdpRequestKind::Announce { .. } => match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_announces().await; } @@ -62,6 +62,7 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -109,7 +110,9 @@ mod tests { ) .unwrap(), ), - kind: crate::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, &stats_repository, &ban_service, @@ -193,7 +196,9 @@ mod tests { ) .unwrap(), ), - kind: crate::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, &stats_repository, &ban_service, diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index a69184e08..0038ac5f9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -43,9 +43,9 @@ pub async fn handle_event( Ok(()) => {} Err(err) => tracing::error!("Failed to set gauge: {}", err), } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } - UdpRequestKind::Announce => { + UdpRequestKind::Announce { announce_request } => { let new_avg = stats_repository .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) .await; @@ -63,7 +63,7 @@ pub async fn handle_event( Ok(()) => {} Err(err) => tracing::error!("Failed to set gauge: {}", err), } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { let new_avg = stats_repository @@ -113,7 +113,8 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -134,7 +135,9 @@ mod tests { .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, req_processing_time: std::time::Duration::from_secs(1), }, @@ -165,7 +168,9 @@ mod tests { .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, req_processing_time: std::time::Duration::from_secs(1), }, diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 8f6e9becf..5c30a9abc 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -10,6 +10,7 @@ use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; @@ -32,6 +33,12 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("Total number of UDP requests banned")), ); + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("Total number of requests with connection ID errors")), + ); + metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), From d4c43bd3a5bc75704d8b8a5b4641f273968aceb4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 18:46:23 +0100 Subject: [PATCH 130/247] feat: [#1375] add new metric label server_binding_address_type - Label name: `server_binding_address_type` - Label values: `plain`, `v4_mapped_v6` Usage example in Prometheous format: ``` udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 1 ``` Example of IPv4-mapped-IPv6 IP: `[::ffff:192.0.2.33]` --- packages/http-tracker-core/src/event.rs | 4 ++ packages/primitives/src/service_binding.rs | 68 +++++++++++++++++++++- packages/udp-tracker-core/src/event.rs | 4 ++ packages/udp-tracker-server/src/event.rs | 4 ++ 4 files changed, 79 insertions(+), 1 deletion(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index 681f4bbfe..cf969b4ff 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -86,6 +86,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server.service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 30eb1aa9e..d5055130e 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -4,6 +4,8 @@ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; use url::Url; +const DUAL_STACK_IP_V4_MAPPED_V6_PREFIX: &str = "::ffff:"; + /// Represents the supported network protocols. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub enum Protocol { @@ -23,6 +25,29 @@ impl fmt::Display for Protocol { } } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum AddressType { + /// Represents a plain IPv4 or IPv6 address. + Plain, + + /// Represents an IPv6 address that is a mapped IPv4 address. + /// + /// This is used for IPv6 addresses that represent an IPv4 address in a dual-stack network. + /// + /// For example: `[::ffff:192.0.2.33]` + V4MappedV6, +} + +impl fmt::Display for AddressType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let addr_type_str = match self { + Self::Plain => "plain", + Self::V4MappedV6 => "v4_mapped_v6", + }; + write!(f, "{addr_type_str}") + } +} + #[derive(thiserror::Error, Debug, Clone)] pub enum Error { #[error("The port number cannot be zero. It must be an assigned valid port.")] @@ -94,6 +119,15 @@ impl ServiceBinding { self.bind_address } + #[must_use] + pub fn bind_address_type(&self) -> AddressType { + if self.is_v4_mapped_v6() { + return AddressType::V4MappedV6; + } + + AddressType::Plain + } + /// # Panics /// /// It never panics because the URL is always valid. @@ -102,6 +136,15 @@ impl ServiceBinding { Url::parse(&format!("{}://{}", self.protocol, self.bind_address)) .expect("Service binding can always be parsed into a URL") } + + fn is_v4_mapped_v6(&self) -> bool { + self.bind_address.ip().is_ipv6() + && self + .bind_address + .ip() + .to_string() + .starts_with(DUAL_STACK_IP_V4_MAPPED_V6_PREFIX) + } } impl From for Url { @@ -126,7 +169,7 @@ mod tests { use rstest::rstest; use url::Url; - use crate::service_binding::{Error, Protocol, ServiceBinding}; + use crate::service_binding::{AddressType, Error, Protocol, ServiceBinding}; #[rstest] #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] @@ -156,6 +199,29 @@ mod tests { ); } + #[test] + fn should_return_the_bind_address_plain_type_for_ipv4_ips() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + } + + #[test] + fn should_return_the_bind_address_plain_type_for_ipv6_ips() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[0:0:0:0:0:0:0:1]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + } + + #[test] + fn should_return_the_bind_address_v4_mapped_v7_type_for_ipv4_ips_mapped_to_ipv6() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[::ffff:192.0.2.33]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::V4MappedV6); + } + #[test] fn should_return_the_corresponding_url() { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index 14a4dbfb3..e9264653e 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -59,6 +59,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 152545e6a..09fc139cb 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -118,6 +118,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), From 552697b452219100f345bc2696fb760f8f68fd15 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:06:24 +0100 Subject: [PATCH 131/247] feat!: [#1514] rename ffield kind to type in JSON metrics "kind" has been renamed to "type" to follow Prometheus name. ```json { "type": "counter", "name": "http_tracker_core_requests_received_total", "samples": [] } ``` --- packages/metrics/src/metric_collection.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 824397000..4038497d1 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -234,7 +234,7 @@ impl Serialize for MetricCollection { S: Serializer, { #[derive(Serialize)] - #[serde(tag = "kind", rename_all = "lowercase")] + #[serde(tag = "type", rename_all = "lowercase")] enum SerializableMetric<'a> { Counter(&'a Metric), Gauge(&'a Metric), @@ -260,7 +260,7 @@ impl<'de> Deserialize<'de> for MetricCollection { D: Deserializer<'de>, { #[derive(Deserialize)] - #[serde(tag = "kind", rename_all = "lowercase")] + #[serde(tag = "type", rename_all = "lowercase")] enum MetricPayload { Counter(Metric), Gauge(Metric), @@ -540,7 +540,7 @@ mod tests { r#" [ { - "kind":"counter", + "type":"counter", "name":"http_tracker_core_announce_requests_received_total", "samples":[ { @@ -564,7 +564,7 @@ mod tests { ] }, { - "kind":"gauge", + "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", "samples":[ { From 2ee3111deebc2970784b65a73d5551d38ec6ec77 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:49:09 +0100 Subject: [PATCH 132/247] refactor: [#1514] ensure_metric_exists method to pass the whole metric This will allow to inject also the metric unit and description. --- packages/metrics/src/metric/mod.rs | 11 ++++ packages/metrics/src/metric_collection.rs | 64 ++++++++++++++--------- 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 2118637b8..14704925c 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -30,6 +30,17 @@ impl Metric { } } + /// # Panics + /// + /// This function will panic if the empty sample collection cannot be created. + #[must_use] + pub fn without_samples(name: MetricName) -> Self { + Self { + name, + sample_collection: SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), + } + } + #[must_use] pub fn name(&self) -> &MetricName { &self.name diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 4038497d1..d10bcfd7c 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -10,7 +10,6 @@ use super::label::LabelSet; use super::metric::{Metric, MetricName}; use super::prometheus::PrometheusSerializable; use crate::metric::description::MetricDescription; -use crate::sample_collection::SampleCollection; use crate::unit::Unit; use crate::METRICS_TARGET; @@ -59,7 +58,10 @@ impl MetricCollection { pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - self.counters.ensure_metric_exists(name); + + let metric = Metric::::without_samples(name.clone()); + + self.counters.ensure_metric_exists(metric); } #[must_use] @@ -120,14 +122,19 @@ impl MetricCollection { } pub fn ensure_counter_exists(&mut self, name: &MetricName) { - self.counters.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.counters.ensure_metric_exists(metric); } // Gauge-specific methods pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - self.gauges.ensure_metric_exists(name); + + let metric = Metric::::without_samples(name.clone()); + + self.gauges.ensure_metric_exists(metric); } #[must_use] @@ -205,7 +212,9 @@ impl MetricCollection { } pub fn ensure_gauge_exists(&mut self, name: &MetricName) { - self.gauges.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.gauges.ensure_metric_exists(metric); } } @@ -336,18 +345,9 @@ impl MetricKindCollection { self.metrics.keys() } - /// # Panics - /// - /// It should not panic as long as empty sample collections are allowed. - pub fn ensure_metric_exists(&mut self, name: &MetricName) { - if !self.metrics.contains_key(name) { - self.metrics.insert( - name.clone(), - Metric::new( - name.clone(), - SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), - ), - ); + pub fn ensure_metric_exists(&mut self, metric: Metric) { + if !self.metrics.contains_key(metric.name()) { + self.metrics.insert(metric.name().clone(), metric); } } } @@ -389,7 +389,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -404,7 +406,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -429,7 +433,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -444,7 +450,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -459,7 +467,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -483,6 +493,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; use crate::tests::{format_prometheus_output, sort_lines}; use crate::{label_name, metric_name}; @@ -731,8 +742,11 @@ mod tests { let mut counters = MetricKindCollection::default(); let mut gauges = MetricKindCollection::default(); - counters.ensure_metric_exists(&metric_name!("test_counter")); - gauges.ensure_metric_exists(&metric_name!("test_gauge")); + let counter = Metric::::without_samples(metric_name!("test_counter")); + counters.ensure_metric_exists(counter); + + let gauge = Metric::::without_samples(metric_name!("test_gauge")); + gauges.ensure_metric_exists(gauge); let metric_collection = MetricCollection::new(counters, gauges).unwrap(); @@ -748,6 +762,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; #[test] fn it_should_increase_a_preexistent_counter() { @@ -845,6 +860,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; #[test] fn it_should_set_a_preexistent_gauge() { From 031bf65fe7b89ca85409a9c156017df100d48c2e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:52:31 +0100 Subject: [PATCH 133/247] refactor: [#1514] remove unused code --- packages/metrics/src/metric_collection.rs | 32 ----------------------- 1 file changed, 32 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index d10bcfd7c..b9e397e5b 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -121,12 +121,6 @@ impl MetricCollection { Ok(()) } - pub fn ensure_counter_exists(&mut self, name: &MetricName) { - let metric = Metric::::without_samples(name.clone()); - - self.counters.ensure_metric_exists(metric); - } - // Gauge-specific methods pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { @@ -210,12 +204,6 @@ impl MetricCollection { Ok(()) } - - pub fn ensure_gauge_exists(&mut self, name: &MetricName) { - let metric = Metric::::without_samples(name.clone()); - - self.gauges.ensure_metric_exists(metric); - } } #[derive(thiserror::Error, Debug, Clone)] @@ -813,16 +801,6 @@ mod tests { ); } - #[test] - fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { - let mut metric_collection = - MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - - metric_collection.ensure_counter_exists(&metric_name!("test_counter")); - - assert!(metric_collection.contains_counter(&metric_name!("test_counter"))); - } - #[test] fn it_should_allow_describing_a_counter_before_using_it() { let mut metric_collection = @@ -905,16 +883,6 @@ mod tests { ); } - #[test] - fn it_should_allow_making_sure_a_gauge_exists_without_setting_it() { - let mut metric_collection = - MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - - metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); - - assert!(metric_collection.contains_gauge(&metric_name!("test_gauge"))); - } - #[test] fn it_should_allow_describing_a_gauge_before_using_it() { let mut metric_collection = From 458497b6460e9069d4e7fdaf27da43864cf0ed2e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 16:04:22 +0100 Subject: [PATCH 134/247] feat: [#1514] add unit and description to metrics It's also shown in the JSON export format. ```json { "metrics": [ { "type": "counter", "name": "torrent_repository_torrents_downloads_total", "unit": "count", "description": "The total number of torrent downloads.", "samples": [] } } ``` todo: show them in the Prometheus export format. --- .../http-tracker-core/src/statistics/mod.rs | 2 +- packages/metrics/src/metric/mod.rs | 35 ++++++-- packages/metrics/src/metric_collection.rs | 89 +++++++++++++------ packages/metrics/src/unit.rs | 6 +- .../src/statistics/mod.rs | 22 ++--- packages/tracker-core/src/statistics/mod.rs | 2 +- .../udp-tracker-core/src/statistics/mod.rs | 2 +- .../udp-tracker-server/src/statistics/mod.rs | 16 ++-- 8 files changed, 116 insertions(+), 58 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index f949babbd..7181632aa 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of HTTP requests received")), + Some(MetricDescription::new("Total number of HTTP requests received")), ); metrics diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 14704925c..eff2c7a5f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -9,7 +9,9 @@ use super::label::LabelSet; use super::prometheus::PrometheusSerializable; use super::sample_collection::SampleCollection; use crate::gauge::Gauge; +use crate::metric::description::MetricDescription; use crate::sample::Measurement; +use crate::unit::Unit; pub type MetricName = name::MetricName; @@ -17,15 +19,28 @@ pub type MetricName = name::MetricName; pub struct Metric { name: MetricName, + #[serde(rename = "unit")] + opt_unit: Option, + + #[serde(rename = "description")] + opt_description: Option, + #[serde(rename = "samples")] sample_collection: SampleCollection, } impl Metric { #[must_use] - pub fn new(name: MetricName, samples: SampleCollection) -> Self { + pub fn new( + name: MetricName, + opt_unit: Option, + opt_description: Option, + samples: SampleCollection, + ) -> Self { Self { name, + opt_unit, + opt_description, sample_collection: samples, } } @@ -34,9 +49,11 @@ impl Metric { /// /// This function will panic if the empty sample collection cannot be created. #[must_use] - pub fn without_samples(name: MetricName) -> Self { + pub fn new_empty_with_name(name: MetricName) -> Self { Self { name, + opt_unit: None, + opt_description: None, sample_collection: SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), } } @@ -119,7 +136,7 @@ mod tests { let samples = SampleCollection::::default(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert!(metric.is_empty()); } @@ -133,7 +150,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); - Metric::::new(name.clone(), samples) + Metric::::new(name.clone(), None, None, samples) } #[test] @@ -147,7 +164,7 @@ mod tests { let samples = SampleCollection::::default(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_eq!(metric.number_of_samples(), 0); } @@ -166,7 +183,7 @@ mod tests { let samples = SampleCollection::::default(); - let _metric = Metric::::new(name, samples); + let _metric = Metric::::new(name, None, None, samples); } #[test] @@ -179,7 +196,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); } @@ -200,7 +217,7 @@ mod tests { let samples = SampleCollection::::default(); - let _metric = Metric::::new(name, samples); + let _metric = Metric::::new(name, None, None, samples); } #[test] @@ -213,7 +230,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index b9e397e5b..59c0448af 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -10,6 +10,7 @@ use super::label::LabelSet; use super::metric::{Metric, MetricName}; use super::prometheus::PrometheusSerializable; use crate::metric::description::MetricDescription; +use crate::sample_collection::SampleCollection; use crate::unit::Unit; use crate::METRICS_TARGET; @@ -56,12 +57,12 @@ impl MetricCollection { // Counter-specific methods - pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { + pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); - self.counters.ensure_metric_exists(metric); + self.counters.insert(metric); } #[must_use] @@ -123,12 +124,12 @@ impl MetricCollection { // Gauge-specific methods - pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { + pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); - self.gauges.ensure_metric_exists(metric); + self.gauges.insert(metric); } #[must_use] @@ -333,11 +334,15 @@ impl MetricKindCollection { self.metrics.keys() } - pub fn ensure_metric_exists(&mut self, metric: Metric) { + pub fn insert_if_absent(&mut self, metric: Metric) { if !self.metrics.contains_key(metric.name()) { - self.metrics.insert(metric.name().clone(), metric); + self.insert(metric); } } + + pub fn insert(&mut self, metric: Metric) { + self.metrics.insert(metric.name().clone(), metric); + } } impl MetricKindCollection { @@ -377,9 +382,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -394,9 +399,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -421,9 +426,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -438,9 +443,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -455,9 +460,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -523,11 +528,15 @@ mod tests { MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), MetricKindCollection::new(vec![Metric::new( metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), @@ -541,6 +550,8 @@ mod tests { { "type":"counter", "name":"http_tracker_core_announce_requests_received_total", + "unit": null, + "description": null, "samples":[ { "value":1, @@ -565,6 +576,8 @@ mod tests { { "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", + "unit": null, + "description": null, "samples":[ { "value":1.0, @@ -603,10 +616,20 @@ mod tests { #[test] fn it_should_not_allow_duplicate_names_across_types() { - let counters = - MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); - let gauges = - MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); + let counters = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); + let gauges = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); assert!(MetricCollection::new(counters, gauges).is_err()); } @@ -699,6 +722,8 @@ mod tests { let metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), + None, + None, SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), @@ -730,11 +755,11 @@ mod tests { let mut counters = MetricKindCollection::default(); let mut gauges = MetricKindCollection::default(); - let counter = Metric::::without_samples(metric_name!("test_counter")); - counters.ensure_metric_exists(counter); + let counter = Metric::::new_empty_with_name(metric_name!("test_counter")); + counters.insert_if_absent(counter); - let gauge = Metric::::without_samples(metric_name!("test_gauge")); - gauges.ensure_metric_exists(gauge); + let gauge = Metric::::new_empty_with_name(metric_name!("test_gauge")); + gauges.insert_if_absent(gauge); let metric_collection = MetricCollection::new(counters, gauges).unwrap(); @@ -760,6 +785,8 @@ mod tests { let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), )]) .unwrap(), @@ -819,10 +846,14 @@ mod tests { let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), ]); @@ -849,6 +880,8 @@ mod tests { MetricKindCollection::default(), MetricKindCollection::new(vec![Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), )]) .unwrap(), @@ -901,10 +934,14 @@ mod tests { let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), ]); diff --git a/packages/metrics/src/unit.rs b/packages/metrics/src/unit.rs index f7a528bed..43b42bf79 100644 --- a/packages/metrics/src/unit.rs +++ b/packages/metrics/src/unit.rs @@ -4,7 +4,11 @@ //! The `Unit` enum is used to specify the unit of measurement for metrics. //! //! They were copied from the `metrics` crate, to allow future compatibility. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] pub enum Unit { Count, Percent, diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index cfc252e34..6505a2db2 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -36,31 +36,31 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents added.")), + Some(MetricDescription::new("The total number of torrents added.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents removed.")), + Some(MetricDescription::new("The total number of torrents removed.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents.")), + Some(MetricDescription::new("The total number of torrents.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrent downloads.")), + Some(MetricDescription::new("The total number of torrent downloads.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of inactive torrents.")), + Some(MetricDescription::new("The total number of inactive torrents.")), ); // Peers metrics @@ -68,25 +68,25 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers added.")), + Some(MetricDescription::new("The total number of peers added.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers removed.")), + Some(MetricDescription::new("The total number of peers removed.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers updated.")), + Some(MetricDescription::new("The total number of peers updated.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new( + Some(MetricDescription::new( "The total number of peer connections (one connection per torrent).", )), ); @@ -94,13 +94,13 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of unique peers.")), + Some(MetricDescription::new("The total number of unique peers.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of inactive peers.")), + Some(MetricDescription::new("The total number of inactive peers.")), ); metrics diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index ff8187379..fdb8e8fd4 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -21,7 +21,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrent downloads (persisted).")), + Some(MetricDescription::new("The total number of torrent downloads (persisted).")), ); metrics diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index 9eb85d7f1..fec76069e 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests received")), + Some(MetricDescription::new("Total number of UDP requests received")), ); metrics diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 5c30a9abc..a7da2dc63 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -24,49 +24,49 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests aborted")), + Some(MetricDescription::new("Total number of UDP requests aborted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests banned")), + Some(MetricDescription::new("Total number of UDP requests banned")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of requests with connection ID errors")), + Some(MetricDescription::new("Total number of requests with connection ID errors")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests received")), + Some(MetricDescription::new("Total number of UDP requests received")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests accepted")), + Some(MetricDescription::new("Total number of UDP requests accepted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP responses sent")), + Some(MetricDescription::new("Total number of UDP responses sent")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of errors processing UDP requests")), + Some(MetricDescription::new("Total number of errors processing UDP requests")), ); metrics.metric_collection.describe_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), - Some(&MetricDescription::new( + Some(MetricDescription::new( "Average time to process a UDP connect request in nanoseconds", )), ); From 842739ff95d33f1b47afc9b71459b3f8671ed175 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 11:18:02 +0100 Subject: [PATCH 135/247] feat: [#1514] add HELP and TYPE to prometehous metric export --- packages/metrics/src/metric/mod.rs | 44 +++++++++++++++++++++-- packages/metrics/src/metric_collection.rs | 4 +++ 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index eff2c7a5f..08f7dd485 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,18 +103,56 @@ impl Metric { } } -impl PrometheusSerializable for Metric { +impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { let samples: Vec = self .sample_collection .iter() .map(|(label_set, sample)| { - format!( + let help = if let Some(description) = &self.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + }; + + let kind = format!("# TYPE {} counter\n", self.name.to_prometheus()); + + let metric = format!( "{}{} {}", self.name.to_prometheus(), label_set.to_prometheus(), sample.value().to_prometheus() - ) + ); + + format!("{help}{kind}{metric}") + }) + .collect(); + samples.join("\n") + } +} + +impl PrometheusSerializable for Metric { + fn to_prometheus(&self) -> String { + let samples: Vec = self + .sample_collection + .iter() + .map(|(label_set, sample)| { + let help = if let Some(description) = &self.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + }; + + let kind = format!("# TYPE {} gauge\n", self.name.to_prometheus()); + + let metric = format!( + "{}{} {}", + self.name.to_prometheus(), + label_set.to_prometheus(), + sample.value().to_prometheus() + ); + + format!("{help}{kind}{metric}") }) .collect(); samples.join("\n") diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 59c0448af..23b7609f6 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -607,7 +607,9 @@ mod tests { fn prometheus() -> String { format_prometheus_output( r#" + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, ) @@ -739,7 +741,9 @@ mod tests { let expected_prometheus_output = format_prometheus_output( r#" + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, ); From ed1322b7a8be7cf039d01aaab112eefe01e55ba0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 11:25:48 +0100 Subject: [PATCH 136/247] refactor: [#1514] reorganize SampleCollection tests --- packages/metrics/src/sample_collection.rs | 90 +++++++++++++---------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index e815f26ec..a87aacb63 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -168,10 +168,8 @@ mod tests { use crate::counter::Counter; use crate::label::LabelSet; - use crate::prometheus::PrometheusSerializable; use crate::sample::Sample; use crate::sample_collection::SampleCollection; - use crate::tests::format_prometheus_output; fn sample_update_time() -> DurationSinceUnixEpoch { DurationSinceUnixEpoch::from_secs(1_743_552_000) @@ -242,56 +240,74 @@ mod tests { assert!(!collection.is_empty()); } - #[test] - fn it_should_be_serializable_and_deserializable_for_json_format() { - let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]).unwrap(); + mod json_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; - let serialized = serde_json::to_string(&collection).unwrap(); - let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); + #[test] + fn it_should_be_serializable_and_deserializable_for_json_format() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); - assert_eq!(deserialized, collection); - } + let serialized = serde_json::to_string(&collection).unwrap(); + let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); - #[test] - fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { - let samples = vec![ - Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), - Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), - ]; + assert_eq!(deserialized, collection); + } - let serialized = serde_json::to_string(&samples).unwrap(); + #[test] + fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; - let result: Result, _> = serde_json::from_str(&serialized); + let serialized = serde_json::to_string(&samples).unwrap(); - assert!(result.is_err()); + let result: Result, _> = serde_json::from_str(&serialized); + + assert!(result.is_err()); + } } - #[test] - fn it_should_be_exportable_to_prometheus_format_when_empty() { - let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]).unwrap(); + mod prometheus_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; + use crate::tests::format_prometheus_output; - let prometheus_output = collection.to_prometheus(); + #[test] + fn it_should_be_exportable_to_prometheus_format_when_empty() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); - assert!(!prometheus_output.is_empty()); - } + let prometheus_output = collection.to_prometheus(); - #[test] - fn it_should_be_exportable_to_prometheus_format() { - let sample = Sample::new( - Counter::new(1), - sample_update_time(), - LabelSet::from(vec![("labe_name_1", "label value value 1")]), - ); + assert!(!prometheus_output.is_empty()); + } - let collection = SampleCollection::new(vec![sample]).unwrap(); + #[test] + fn it_should_be_exportable_to_prometheus_format() { + let sample = Sample::new( + Counter::new(1), + sample_update_time(), + LabelSet::from(vec![("labe_name_1", "label value value 1")]), + ); - let prometheus_output = collection.to_prometheus(); + let collection = SampleCollection::new(vec![sample]).unwrap(); - let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + let prometheus_output = collection.to_prometheus(); - assert_eq!(prometheus_output, expected_prometheus_output); + let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + + assert_eq!(prometheus_output, expected_prometheus_output); + } } #[cfg(test)] From a89406daad7c308639635fa234d39b27ec41085b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 12:49:34 +0100 Subject: [PATCH 137/247] refactor: [#1514] remove duplicate code in Metric type --- packages/metrics/src/metric/mod.rs | 117 ++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 34 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 08f7dd485..a97621da8 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,28 +103,92 @@ impl Metric { } } +/// `PrometheusMetricSample` is a wrapper around types that provides methods to +/// convert the metric and its measurement into a Prometheus-compatible format. +/// +/// In Prometheus, a metric is a time series that consists of a name, a set of +/// labels, and a value. The sample value needs data from the `Metric` and +/// `Measurement` structs, as well as the `LabelSet` that defines the labels for +/// the metric. +struct PrometheusMetricSample<'a, T> { + metric: &'a Metric, + measurement: &'a Measurement, + label_set: &'a LabelSet, +} + +enum PrometheusType { + Counter, + Gauge, +} + +impl PrometheusSerializable for PrometheusType { + fn to_prometheus(&self) -> String { + match self { + PrometheusType::Counter => "counter".to_string(), + PrometheusType::Gauge => "gauge".to_string(), + } + } +} + +impl PrometheusMetricSample<'_, T> { + fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { + format!( + "{}{}{}", + self.help_line(), + self.type_line(prometheus_type), + self.metric_line() + ) + } + + fn help_line(&self) -> String { + if let Some(description) = &self.metric.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + } + } + + fn type_line(&self, kind: &PrometheusType) -> String { + format!("# TYPE {} {}\n", self.metric.name().to_prometheus(), kind.to_prometheus()) + } + + fn metric_line(&self) -> String { + format!( + "{}{} {}", + self.metric.name.to_prometheus(), + self.label_set.to_prometheus(), + self.measurement.value().to_prometheus() + ) + } +} + +impl<'a> PrometheusMetricSample<'a, Counter> { + pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { + Self { + metric, + measurement, + label_set, + } + } +} + +impl<'a> PrometheusMetricSample<'a, Gauge> { + pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { + Self { + metric, + measurement, + label_set, + } + } +} + impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { let samples: Vec = self .sample_collection .iter() - .map(|(label_set, sample)| { - let help = if let Some(description) = &self.opt_description { - format!("# HELP {description}\n") - } else { - String::new() - }; - - let kind = format!("# TYPE {} counter\n", self.name.to_prometheus()); - - let metric = format!( - "{}{} {}", - self.name.to_prometheus(), - label_set.to_prometheus(), - sample.value().to_prometheus() - ); - - format!("{help}{kind}{metric}") + .map(|(label_set, measurement)| { + PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Counter) }) .collect(); samples.join("\n") @@ -136,23 +200,8 @@ impl PrometheusSerializable for Metric { let samples: Vec = self .sample_collection .iter() - .map(|(label_set, sample)| { - let help = if let Some(description) = &self.opt_description { - format!("# HELP {description}\n") - } else { - String::new() - }; - - let kind = format!("# TYPE {} gauge\n", self.name.to_prometheus()); - - let metric = format!( - "{}{} {}", - self.name.to_prometheus(), - label_set.to_prometheus(), - sample.value().to_prometheus() - ); - - format!("{help}{kind}{metric}") + .map(|(label_set, measurement)| { + PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Gauge) }) .collect(); samples.join("\n") From 748e6a50e6f18324d2587eddb3fc43f626fb3876 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 15:41:07 +0100 Subject: [PATCH 138/247] test: [#1514] add tests to metrics package --- packages/metrics/src/label/set.rs | 200 +++++++++++++++++++++------- packages/metrics/src/label/value.rs | 59 ++++++++ 2 files changed, 211 insertions(+), 48 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 2b6334fc7..1c2c3e27e 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -175,6 +175,7 @@ impl PrometheusSerializable for LabelSet { mod tests { use std::collections::BTreeMap; + use std::hash::{DefaultHasher, Hash}; use pretty_assertions::assert_eq; @@ -195,54 +196,6 @@ mod tests { ] } - #[test] - fn it_should_allow_instantiation_from_an_array_of_label_pairs() { - let label_set: LabelSet = sample_array_of_label_pairs().into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { - let label_set: LabelSet = sample_vec_of_label_pairs().into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_b_tree_map() { - let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_label_pair() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) - } - ); - } - #[test] fn it_should_allow_inserting_a_new_label_pair() { let mut label_set = LabelSet::default(); @@ -338,4 +291,155 @@ mod tests { assert_eq!(label_set.to_string(), r#"{label_name="label value"}"#); } + + #[test] + fn it_should_allow_instantiation_from_an_array_of_label_pairs() { + let label_set: LabelSet = sample_array_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { + let label_set: LabelSet = sample_vec_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_b_tree_map() { + let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_label_pair() { + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_str_tuples() { + let label_set: LabelSet = vec![("foo", "bar"), ("baz", "qux")].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_string_tuples() { + let label_set: LabelSet = vec![("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_serialized_label() { + use super::SerializedLabel; + let label_set: LabelSet = vec![ + SerializedLabel { + name: LabelName::new("foo"), + value: LabelValue::new("bar"), + }, + SerializedLabel { + name: LabelName::new("baz"), + value: LabelValue::new("qux"), + }, + ] + .into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_string_tuples() { + let arr: [(String, String); 2] = [("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_str_tuples() { + let arr: [(&str, &str); 2] = [("foo", "bar"), ("baz", "qux")]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_be_comparable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let c: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + + let mut hasher = DefaultHasher::new(); + + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let _unused = a.clone(); + } } diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs index ffdbce333..4f25844a8 100644 --- a/packages/metrics/src/label/value.rs +++ b/packages/metrics/src/label/value.rs @@ -33,6 +33,9 @@ impl From for LabelValue { #[cfg(test)] mod tests { + use std::collections::hash_map::DefaultHasher; + use std::hash::Hash; + use crate::label::value::LabelValue; use crate::prometheus::PrometheusSerializable; @@ -41,4 +44,60 @@ mod tests { let label_value = LabelValue::new("value"); assert_eq!(label_value.to_prometheus(), "value"); } + + #[test] + fn it_could_be_initialized_from_str() { + let lv = LabelValue::new("abc"); + assert_eq!(lv.0, "abc"); + } + + #[test] + fn it_should_allow_to_create_an_ignored_label_value() { + let lv = LabelValue::ignore(); + assert_eq!(lv.0, ""); + } + + #[test] + fn it_should_be_converted_from_string() { + let s = String::from("foo"); + let lv: LabelValue = s.clone().into(); + assert_eq!(lv.0, s); + } + + #[test] + fn it_should_be_comparable() { + let a = LabelValue::new("x"); + let b = LabelValue::new("x"); + let c = LabelValue::new("y"); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a = LabelValue::new("x"); + let b = LabelValue::new("y"); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a = LabelValue::new("x"); + let mut hasher = DefaultHasher::new(); + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a = LabelValue::new("x"); + let _unused = a.clone(); + } + + #[test] + fn it_should_implement_display() { + let a = LabelValue::new("x"); + assert_eq!(format!("{a}"), "x"); + } } From 642d7742ea44dfd65db0ce840dc33053c0ce53dd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 16:15:11 +0100 Subject: [PATCH 139/247] fix: [#1514] HELP line in Prometheus export must contain metric name Format for each metric sample: {label_set} Exmaple: ``` udp_tracker_server_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="6868",server_binding_protocol="udp"} 36661 ``` See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information --- packages/metrics/src/metric/description.rs | 13 +++++++++++++ packages/metrics/src/metric/mod.rs | 12 +++++++++++- packages/metrics/src/metric_collection.rs | 10 ++++++---- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/packages/metrics/src/metric/description.rs b/packages/metrics/src/metric/description.rs index 8a50dee90..6a0ca3432 100644 --- a/packages/metrics/src/metric/description.rs +++ b/packages/metrics/src/metric/description.rs @@ -1,6 +1,8 @@ use derive_more::Display; use serde::{Deserialize, Serialize}; +use crate::prometheus::PrometheusSerializable; + #[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] pub struct MetricDescription(String); @@ -11,6 +13,11 @@ impl MetricDescription { } } +impl PrometheusSerializable for MetricDescription { + fn to_prometheus(&self) -> String { + self.0.clone() + } +} #[cfg(test)] mod tests { use super::*; @@ -21,6 +28,12 @@ mod tests { assert_eq!(metric.0, "Metric description"); } + #[test] + fn it_serializes_to_prometheus() { + let label_value = MetricDescription::new("name"); + assert_eq!(label_value.to_prometheus(), "name"); + } + #[test] fn it_should_be_displayed() { let metric = MetricDescription::new("Metric description"); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index a97621da8..f3278d98c 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -133,6 +133,10 @@ impl PrometheusSerializable for PrometheusType { impl PrometheusMetricSample<'_, T> { fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { format!( + // Format: + // # HELP + // # TYPE + // {label_set} "{}{}{}", self.help_line(), self.type_line(prometheus_type), @@ -142,7 +146,12 @@ impl PrometheusMetricSample<'_, T> { fn help_line(&self) -> String { if let Some(description) = &self.metric.opt_description { - format!("# HELP {description}\n") + format!( + // Format: # HELP + "# HELP {} {}\n", + self.metric.name().to_prometheus(), + description.to_prometheus() + ) } else { String::new() } @@ -154,6 +163,7 @@ impl PrometheusMetricSample<'_, T> { fn metric_line(&self) -> String { format!( + // Format: {label_set} "{}{} {}", self.metric.name.to_prometheus(), self.label_set.to_prometheus(), diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 23b7609f6..122895478 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -529,14 +529,14 @@ mod tests { MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), None, - None, + Some(MetricDescription::new("The number of announce requests received.")), SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), MetricKindCollection::new(vec![Metric::new( metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), None, - None, + Some(MetricDescription::new("The average announce processing time in nanoseconds.")), SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), @@ -551,7 +551,7 @@ mod tests { "type":"counter", "name":"http_tracker_core_announce_requests_received_total", "unit": null, - "description": null, + "description": "The number of announce requests received.", "samples":[ { "value":1, @@ -577,7 +577,7 @@ mod tests { "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", "unit": null, - "description": null, + "description": "The average announce processing time in nanoseconds.", "samples":[ { "value":1.0, @@ -607,8 +607,10 @@ mod tests { fn prometheus() -> String { format_prometheus_output( r#" + # HELP http_tracker_core_announce_requests_received_total The number of announce requests received. # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + # HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, From 376f242166725f682c4b80502535b27b88fcb52c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 16:31:07 +0100 Subject: [PATCH 140/247] test: [#1514] add tests to metrics package --- packages/metrics/src/metric/mod.rs | 51 +++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index f3278d98c..6f254023f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -286,14 +286,25 @@ mod tests { #[test] fn it_should_allow_incrementing_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = metric_name!("test_metric"); - let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); - let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); + metric.increment(&label_set, time); - let metric = Metric::::new(name.clone(), None, None, samples); + assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); + } + + #[test] + fn it_should_allow_setting_to_an_absolute_value() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + + metric.absolute(&label_set, 1, time); assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); } @@ -318,16 +329,40 @@ mod tests { } #[test] - fn it_should_allow_setting_a_sample() { + fn it_should_allow_incrementing_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = metric_name!("test_metric"); - let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + metric.increment(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); + } + + #[test] + fn it_should_allow_decrement_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); - let metric = Metric::::new(name.clone(), None, None, samples); + metric.decrement(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 0.0); + } + + #[test] + fn it_should_allow_setting_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + + metric.set(&label_set, 1.0, time); assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } From 507b48035daac72b6ae5c22394fc7198fe3fee02 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 17:01:43 +0100 Subject: [PATCH 141/247] fix: [#1514] bug. Don't allow merge metric collections with the same metrin name It was not possible to merge counters or gauges if the metric names was duplicate but possible when the metric name was duplciate across types. For example, the target collection (the one is mutated) contains a counter with a name that is being used in the source collection (the wan we get metric from) for a gauge metric. --- packages/metrics/src/metric_collection.rs | 82 +++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 122895478..c7dfbba7a 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -50,11 +50,33 @@ impl MetricCollection { /// /// Returns an error if a metric name already exists in the current collection. pub fn merge(&mut self, other: &Self) -> Result<(), Error> { + self.check_cross_type_collision(other)?; self.counters.merge(&other.counters)?; self.gauges.merge(&other.gauges)?; Ok(()) } + /// Returns a set of all metric names in this collection. + fn collect_names(&self) -> HashSet { + self.counters.names().chain(self.gauges.names()).cloned().collect() + } + + /// Checks for name collisions between this collection and another one. + fn check_cross_type_collision(&self, other: &Self) -> Result<(), Error> { + let self_names: HashSet<_> = self.collect_names(); + let other_names: HashSet<_> = other.collect_names(); + + let cross_type_collisions = self_names.intersection(&other_names).next(); + + if let Some(name) = cross_type_collisions { + return Err(Error::MetricNameCollisionInMerge { + metric_name: (*name).clone(), + }); + } + + Ok(()) + } + // Counter-specific methods pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { @@ -774,6 +796,66 @@ mod tests { assert_eq!(prometheus_output, ""); } + #[test] + fn it_should_allow_merging_metric_collections() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); + + collection1.merge(&collection2).unwrap(); + + assert!(collection1.contains_counter(&metric_name!("test_counter"))); + assert!(collection1.contains_gauge(&metric_name!("test_gauge"))); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_the_same_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_different_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time) + .unwrap(); + + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + mod for_counters { use pretty_assertions::assert_eq; From bb2392dda0f2f7339544a3227a2d1adca008f156 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 17:16:48 +0100 Subject: [PATCH 142/247] test: [#1514] add tests to metrics package --- packages/metrics/src/metric_collection.rs | 233 ++++++++++++++++++---- 1 file changed, 194 insertions(+), 39 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c7dfbba7a..c53d02bcf 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -374,17 +374,18 @@ impl MetricKindCollection { /// /// Returns an error if a metric name already exists in the current collection. pub fn merge(&mut self, other: &Self) -> Result<(), Error> { - // Check for name collisions - for metric_name in other.metrics.keys() { - if self.metrics.contains_key(metric_name) { - return Err(Error::MetricNameCollisionInMerge { - metric_name: metric_name.clone(), - }); - } - } + self.check_for_name_collision(other)?; for (metric_name, metric) in &other.metrics { - if self.metrics.insert(metric_name.clone(), metric.clone()).is_some() { + self.metrics.insert(metric_name.clone(), metric.clone()); + } + + Ok(()) + } + + fn check_for_name_collision(&self, other: &Self) -> Result<(), Error> { + for metric_name in other.metrics.keys() { + if self.metrics.contains_key(metric_name) { return Err(Error::MetricNameCollisionInMerge { metric_name: metric_name.clone(), }); @@ -856,6 +857,38 @@ mod tests { assert!(result.is_err()); } + fn collection_with_one_counter(metric_name: &MetricName, label_set: &LabelSet, counter: Counter) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(counter, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), + ) + .unwrap() + } + + fn collection_with_one_gauge(metric_name: &MetricName, label_set: &LabelSet, gauge: Gauge) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::default(), + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(gauge, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + ) + .unwrap() + } + mod for_counters { use pretty_assertions::assert_eq; @@ -866,32 +899,54 @@ mod tests { use crate::sample_collection::SampleCollection; #[test] - fn it_should_increase_a_preexistent_counter() { + fn it_should_allow_setting_to_an_absolute_value() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new( - MetricKindCollection::new(vec![Metric::new( - metric_name!("test_counter"), - None, - None, - SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), - )]) - .unwrap(), - MetricKindCollection::default(), - ) - .unwrap(); + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); - metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + collection + .set_counter(&metric_name!("test_counter"), &label_set, 1, time) .unwrap(); - metric_collection + + assert_eq!( + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) + ); + } + + #[test] + fn it_should_fail_setting_to_an_absolute_value_if_a_gauge_with_the_same_name_exists() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + let result = collection.set_counter(&metric_name!("test_counter"), &label_set, 1, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_counter")) + ); + } + + #[test] + fn it_should_increase_a_preexistent_counter() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + collection .increase_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( - metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Some(Counter::new(2)) + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) ); } @@ -962,30 +1017,89 @@ mod tests { #[test] fn it_should_set_a_preexistent_gauge() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new( - MetricKindCollection::default(), - MetricKindCollection::new(vec![Metric::new( - metric_name!("test_gauge"), - None, - None, - SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), - )]) - .unwrap(), - ) - .unwrap(); + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); - metric_collection + collection .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) .unwrap(); assert_eq!( - metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Some(Gauge::new(1.0)) ); } + #[test] + fn it_should_allow_incrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + collection + .increment_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(1.0)) + ); + } + + #[test] + fn it_should_fail_incrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.increment_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + + #[test] + fn it_should_allow_decrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(1.0)); + + collection + .decrement_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(0.0)) + ); + } + + #[test] + fn it_should_fail_decrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.decrement_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + #[test] fn it_should_automatically_create_a_gauge_when_setting_if_it_does_not_exist() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); @@ -1037,4 +1151,45 @@ mod tests { assert!(result.is_err()); } } + + mod metric_kind_collection { + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::metric::Metric; + use crate::metric_collection::{Error, MetricKindCollection}; + use crate::metric_name; + + #[test] + fn it_should_not_allow_merging_counter_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::::default(); + collection1.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::::default(); + collection2.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + + #[test] + fn it_should_not_allow_merging_gauge_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::::default(); + collection1.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::::default(); + collection2.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + } } From 45bc807c366ee1dd4522ac9cfdb00f45d8eeb606 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 12:59:55 +0100 Subject: [PATCH 143/247] refactor: [#1534] rename TORRENT_REPOSITORY_LOG_TARGET to SWARM_COORDINATION_REGISTRY_LOG_TARGET --- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/statistics/event/listener.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index fc7996817..eb2721a0c 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -22,7 +22,7 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; -pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; +pub const SWARM_COORDINATION_REGISTRY_LOG_TARGET: &str = "SWARM_COORDINATION_REGISTRY"; #[cfg(test)] pub(crate) mod tests { diff --git a/packages/swarm-coordination-registry/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs index f3b534332..9ff707818 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/listener.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/listener.rs @@ -7,18 +7,18 @@ use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; -use crate::{CurrentClock, TORRENT_REPOSITORY_LOG_TARGET}; +use crate::{CurrentClock, SWARM_COORDINATION_REGISTRY_LOG_TARGET}; #[must_use] pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let stats_repository = repository.clone(); - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await; - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository listener finished"); }) } @@ -32,7 +32,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); break; } @@ -42,11 +42,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver closed."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); } } } From c67f27a7f071708cf4739fd7ae3cbff3e946464f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:05:03 +0100 Subject: [PATCH 144/247] refactor: [#1534] Rename torrent_repository_ prefix to swarm_coordination_registry_ --- .../statistics/activity_metrics_updater.rs | 6 +- .../src/statistics/event/handler.rs | 88 ++++++++++++------- .../src/statistics/mod.rs | 44 +++++----- 3 files changed, 83 insertions(+), 55 deletions(-) diff --git a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs index 016e230ec..cf814e810 100644 --- a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs +++ b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs @@ -10,7 +10,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use tracing::instrument; use super::repository::Repository; -use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; +use crate::statistics::{SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL}; use crate::{CurrentClock, Registry}; #[must_use] @@ -81,7 +81,7 @@ async fn update_inactive_peers_total(stats_repository: &Arc, inactiv let _unused = stats_repository .set_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), &LabelSet::default(), inactive_peers_total, CurrentClock::now(), @@ -95,7 +95,7 @@ async fn update_inactive_torrents_total(stats_repository: &Arc, inac let _unused = stats_repository .set_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), &LabelSet::default(), inactive_torrents_total, CurrentClock::now(), diff --git a/packages/swarm-coordination-registry/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs index f8d350a80..17b012086 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/handler.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/handler.rs @@ -8,11 +8,13 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, - TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, - TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; +#[allow(clippy::too_many_lines)] pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { // Torrent events @@ -20,12 +22,16 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, "Torrent added",); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) .await; let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), &LabelSet::default(), now, ) @@ -35,12 +41,16 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, "Torrent removed",); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) .await; let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), &LabelSet::default(), now, ) @@ -54,11 +64,15 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&peer); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) .await; let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), &label_set, now) + .increment_counter(&metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), &label_set, now) .await; } Event::PeerRemoved { info_hash, peer } => { @@ -67,11 +81,19 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&peer); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) .await; let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), + &label_set, + now, + ) .await; } Event::PeerUpdated { @@ -84,7 +106,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&new_peer), now, ) @@ -92,7 +114,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&old_peer), now, ) @@ -102,7 +124,11 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&new_peer); let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), + &label_set, + now, + ) .await; } Event::PeerDownloadCompleted { info_hash, peer } => { @@ -110,7 +136,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), now, ) @@ -217,7 +243,8 @@ mod tests { use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, expect_gauge_metric_to_be}; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -240,7 +267,7 @@ mod tests { expect_gauge_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), &LabelSet::default(), 1.0, ) @@ -252,7 +279,7 @@ mod tests { clock::Stopped::local_set_to_unix_epoch(); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL); let label_set = LabelSet::default(); // Increment the gauge first to simulate a torrent being added. @@ -291,7 +318,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), &LabelSet::default(), 1, ) @@ -315,7 +342,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), &LabelSet::default(), 1, ) @@ -335,7 +362,8 @@ mod tests { use crate::statistics::event::handler::{handle_event, label_set_for_peer}; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -357,7 +385,7 @@ mod tests { expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, }; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL; + use crate::statistics::SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL; use crate::tests::sample_info_hash; use crate::CurrentClock; @@ -373,7 +401,7 @@ mod tests { let peer = make_peer(role); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); handle_event( @@ -402,7 +430,7 @@ mod tests { let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); // Increment the gauge first to simulate a peer being added. @@ -438,7 +466,7 @@ mod tests { let old_peer = make_peer(old_role); let new_peer = make_opposite_role_peer(&old_peer); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); @@ -497,7 +525,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), &label_set_for_peer(&peer), 1, ) @@ -524,7 +552,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), &label_set_for_peer(&peer), 1, ) @@ -552,7 +580,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), &label_set_for_peer(&new_peer), 1, ) @@ -574,7 +602,7 @@ mod tests { use crate::statistics::event::handler::handle_event; use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; + use crate::statistics::SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL; use crate::tests::sample_info_hash; use crate::CurrentClock; @@ -590,7 +618,7 @@ mod tests { let peer = make_peer(role); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); handle_event( diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index 6505a2db2..5b9b7f376 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -10,22 +10,22 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torrents_added_total"; -const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL: &str = "swarm_coordination_registry_torrents_added_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL: &str = "swarm_coordination_registry_torrents_removed_total"; -const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; -const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL: &str = "swarm_coordination_registry_torrents_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL: &str = "swarm_coordination_registry_torrents_downloads_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_torrents_inactive_total"; // Peers metrics -const TORRENT_REPOSITORY_PEERS_ADDED_TOTAL: &str = "torrent_repository_peers_added_total"; -const TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL: &str = "torrent_repository_peers_removed_total"; -const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_updated_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL: &str = "swarm_coordination_registry_peers_added_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL: &str = "swarm_coordination_registry_peers_removed_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL: &str = "swarm_coordination_registry_peers_updated_total"; -const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; -const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet -const TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL: &str = "torrent_repository_peers_inactive_total"; +const SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL: &str = "swarm_coordination_registry_peer_connections_total"; +const SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL: &str = "swarm_coordination_registry_unique_peers_total"; // todo: not implemented yet +const SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_peers_inactive_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -34,31 +34,31 @@ pub fn describe_metrics() -> Metrics { // Torrent metrics metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents added.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents removed.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrent downloads.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of inactive torrents.")), ); @@ -66,25 +66,25 @@ pub fn describe_metrics() -> Metrics { // Peers metrics metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers added.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers removed.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers updated.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), Some(MetricDescription::new( "The total number of peer connections (one connection per torrent).", @@ -92,13 +92,13 @@ pub fn describe_metrics() -> Metrics { ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of unique peers.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of inactive peers.")), ); From c26315aea7c837ff0c523b5979600aa8f00d93bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:06:36 +0100 Subject: [PATCH 145/247] refactor: [#1534] Rename TorrentRepositoryContainer type to SwarmCoordinationRegistryContainer --- packages/axum-http-tracker-server/src/environment.rs | 4 ++-- packages/axum-http-tracker-server/src/server.rs | 4 ++-- packages/axum-rest-tracker-api-server/src/environment.rs | 4 ++-- packages/http-tracker-core/src/container.rs | 4 ++-- packages/rest-tracker-api-core/src/container.rs | 8 ++++---- packages/rest-tracker-api-core/src/statistics/services.rs | 4 ++-- packages/swarm-coordination-registry/src/container.rs | 4 ++-- packages/tracker-core/src/container.rs | 7 +++++-- packages/tracker-core/tests/common/test_env.rs | 6 +++--- packages/udp-tracker-core/src/container.rs | 4 ++-- packages/udp-tracker-server/src/environment.rs | 4 ++-- src/container.rs | 6 +++--- 12 files changed, 31 insertions(+), 28 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 54c6b7767..ccc54b9cc 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,7 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -144,7 +144,7 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index b8ece8086..99ba4be51 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -259,7 +259,7 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; - use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::server::{HttpServer, Launcher}; @@ -290,7 +290,7 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 6be4cc53c..fc6ee112e 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,7 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -173,7 +173,7 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 35f75e1fe..f573740a7 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -27,7 +27,7 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index f76c2ece3..238e76801 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,14 +7,14 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, // Torrent repository - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, // Tracker core pub tracker_core_container: Arc, @@ -36,7 +36,7 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); @@ -65,7 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( - torrent_repository_container: &Arc, + torrent_repository_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 56536a02f..1467517d9 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -165,7 +165,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; - use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration; use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; @@ -180,7 +180,7 @@ mod tests { let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(SenderStatus::Enabled)); + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index 1b56b3d4b..1a243f967 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -8,14 +8,14 @@ use crate::event::{self}; use crate::statistics::repository::Repository; use crate::{statistics, Registry}; -pub struct TorrentRepositoryContainer { +pub struct SwarmCoordinationRegistryContainer { pub swarms: Arc, pub event_bus: Arc, pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, } -impl TorrentRepositoryContainer { +impl SwarmCoordinationRegistryContainer { #[must_use] pub fn initialize(sender_status: SenderStatus) -> Self { // Torrent repository stats diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 949761553..8d776a3e6 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -38,7 +38,10 @@ pub struct TrackerCoreContainer { impl TrackerCoreContainer { #[must_use] - pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { + pub fn initialize_from( + core_config: &Arc, + torrent_repository_container: &Arc, + ) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 64bdcaad8..0c1ea8524 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -14,10 +14,10 @@ use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; pub struct TestEnv { - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, pub tracker_core_container: Arc, } @@ -33,7 +33,7 @@ impl TestEnv { pub fn new(core_config: Core) -> Self { let core_config = Arc::new(core_config); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index c4be395fc..a6e45268f 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -32,7 +32,7 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 268259f1b..d12a1b011 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,7 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -175,7 +175,7 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/src/container.rs b/src/container.rs index bb5873fb2..0f73bda6b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,7 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -30,7 +30,7 @@ pub struct AppContainer { pub registar: Arc, // Torrent Repository - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, // Core pub tracker_core_container: Arc, @@ -60,7 +60,7 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); From b09e79c5983952ece3f94e6c689f62737bf1fd86 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:11:56 +0100 Subject: [PATCH 146/247] refactor: [#1534] Rename torrent_repository_container to swarm_coordination_registry_container --- .../src/environment.rs | 4 ++-- .../axum-http-tracker-server/src/server.rs | 4 ++-- .../src/environment.rs | 6 ++--- .../src/v1/context/stats/routes.rs | 5 ++++- packages/http-tracker-core/src/container.rs | 4 ++-- .../rest-tracker-api-core/src/container.rs | 12 +++++----- .../src/statistics/services.rs | 6 +++-- packages/tracker-core/src/container.rs | 6 +++-- .../tracker-core/tests/common/test_env.rs | 22 +++++++++++-------- packages/udp-tracker-core/src/container.rs | 4 ++-- .../udp-tracker-server/src/environment.rs | 4 ++-- .../jobs/activity_metrics_updater.rs | 4 ++-- src/bootstrap/jobs/torrent_repository.rs | 4 ++-- src/bootstrap/jobs/tracker_core.rs | 2 +- src/container.rs | 10 ++++----- 15 files changed, 54 insertions(+), 43 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index ccc54b9cc..6e58c2cac 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -144,13 +144,13 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_container = diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 99ba4be51..1775a3d72 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -290,13 +290,13 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index fc6ee112e..cddb45277 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -173,13 +173,13 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_core_container = @@ -191,7 +191,7 @@ impl EnvContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( - &torrent_repository_container, + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index a573b764a..c2a1466e0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -30,7 +30,10 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 238e76801..93655b2ba 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -14,7 +14,7 @@ pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, // Torrent repository - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, // Tracker core pub tracker_core_container: Arc, @@ -36,13 +36,13 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_core_container = @@ -54,7 +54,7 @@ impl TrackerHttpApiCoreContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( - &torrent_repository_container, + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -65,7 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( - torrent_repository_container: &Arc, + swarm_coordination_registry_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, @@ -76,7 +76,7 @@ impl TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), // Torrent repository - torrent_repository_container: torrent_repository_container.clone(), + swarm_coordination_registry_container: swarm_coordination_registry_container.clone(), // Tracker core tracker_core_container: tracker_core_container.clone(), diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 1467517d9..6474df0d7 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -180,9 +180,11 @@ mod tests { let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); + let swarm_coordination_registry_container = + Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); - let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); + let tracker_core_container = + TrackerCoreContainer::initialize_from(&core_config, &swarm_coordination_registry_container.clone()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 8d776a3e6..93b8efd7e 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -40,7 +40,7 @@ impl TrackerCoreContainer { #[must_use] pub fn initialize_from( core_config: &Arc, - torrent_repository_container: &Arc, + swarm_coordination_registry_container: &Arc, ) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -53,7 +53,9 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new( + swarm_coordination_registry_container.swarms.clone(), + )); let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 0c1ea8524..d3bc9652a 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -17,7 +17,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; pub struct TestEnv { - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, pub tracker_core_container: Arc, } @@ -33,17 +33,17 @@ impl TestEnv { pub fn new(core_config: Core) -> Self { let core_config = Arc::new(core_config); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self { - torrent_repository_container, + swarm_coordination_registry_container, tracker_core_container, } } @@ -68,13 +68,13 @@ impl TestEnv { let mut jobs = vec![]; let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( - self.torrent_repository_container.event_bus.receiver(), - &self.torrent_repository_container.stats_repository, + self.swarm_coordination_registry_container.event_bus.receiver(), + &self.swarm_coordination_registry_container.stats_repository, ); jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - self.torrent_repository_container.event_bus.receiver(), + self.swarm_coordination_registry_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container @@ -147,7 +147,7 @@ impl TestEnv { } pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrent_repository_container + self.swarm_coordination_registry_container .swarms .get_swarm_metadata(info_hash) .await @@ -155,7 +155,11 @@ impl TestEnv { } pub async fn remove_swarm(&self, info_hash: &InfoHash) { - self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); + self.swarm_coordination_registry_container + .swarms + .remove(info_hash) + .await + .unwrap(); } pub async fn get_counter_value(&self, metric_name: &str) -> u64 { diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index a6e45268f..1d8b1d71c 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -32,13 +32,13 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index d12a1b011..f48b3a7c1 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -175,13 +175,13 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let udp_tracker_core_container = diff --git a/src/bootstrap/jobs/activity_metrics_updater.rs b/src/bootstrap/jobs/activity_metrics_updater.rs index 9813fed65..9bbdc3f9b 100644 --- a/src/bootstrap/jobs/activity_metrics_updater.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -12,8 +12,8 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { torrust_tracker_swarm_coordination_registry::statistics::activity_metrics_updater::start_job( - &app_container.torrent_repository_container.swarms.clone(), - &app_container.torrent_repository_container.stats_repository.clone(), + &app_container.swarm_coordination_registry_container.swarms.clone(), + &app_container.swarm_coordination_registry_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), ) } diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index c64917ea6..44ffdf53b 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -8,8 +8,8 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( - app_container.torrent_repository_container.event_bus.receiver(), - &app_container.torrent_repository_container.stats_repository, + app_container.swarm_coordination_registry_container.event_bus.receiver(), + &app_container.swarm_coordination_registry_container.stats_repository, ); Some(job) diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index fd5cacbda..f2fc25ef3 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -8,7 +8,7 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - app_container.torrent_repository_container.event_bus.receiver(), + app_container.swarm_coordination_registry_container.event_bus.receiver(), &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, app_container diff --git a/src/container.rs b/src/container.rs index 0f73bda6b..461a5b36a 100644 --- a/src/container.rs +++ b/src/container.rs @@ -30,7 +30,7 @@ pub struct AppContainer { pub registar: Arc, // Torrent Repository - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, // Core pub tracker_core_container: Arc, @@ -60,7 +60,7 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); @@ -68,7 +68,7 @@ impl AppContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); // HTTP @@ -98,7 +98,7 @@ impl AppContainer { registar, // Torrent Repository - torrent_repository_container, + swarm_coordination_registry_container, // Core tracker_core_container, @@ -146,7 +146,7 @@ impl AppContainer { TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), - torrent_repository_container: self.torrent_repository_container.clone(), + swarm_coordination_registry_container: self.swarm_coordination_registry_container.clone(), tracker_core_container: self.tracker_core_container.clone(), From 8da42e4333d015ff5927da10807f7c67fa399ece Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:13:48 +0100 Subject: [PATCH 147/247] refactor: [#1534] Rename torrent_repository_event_listener to swarm_coordination_registry_event_listener --- src/app.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/app.rs b/src/app.rs index ccc2e8bcb..5050c1dd1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -71,7 +71,7 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { let mut job_manager = JobManager::new(); - start_torrent_repository_event_listener(config, app_container, &mut job_manager); + start_swarm_coordination_registry_event_listener(config, app_container, &mut job_manager); start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); @@ -132,13 +132,13 @@ async fn load_torrent_metrics(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager, ) { job_manager.push_opt( - "torrent_repository_event_listener", + "swarm_coordination_registry_event_listener", jobs::torrent_repository::start_event_listener(config, app_container), ); } From b2feb7b3150f0314cace37b7f08a926a2eb63298 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:15:40 +0100 Subject: [PATCH 148/247] docs: [#1534] Update comments after rename --- packages/rest-tracker-api-core/src/container.rs | 4 ++-- packages/swarm-coordination-registry/src/container.rs | 2 +- packages/tracker-core/src/statistics/persisted/downloads.rs | 2 +- src/container.rs | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 93655b2ba..bcc5a0186 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -13,7 +13,7 @@ use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, - // Torrent repository + // Swarm Coordination Registry Container pub swarm_coordination_registry_container: Arc, // Tracker core @@ -75,7 +75,7 @@ impl TrackerHttpApiCoreContainer { Arc::new(TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), - // Torrent repository + // Swarm Coordination Registry Container swarm_coordination_registry_container: swarm_coordination_registry_container.clone(), // Tracker core diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index 1a243f967..718e3ee52 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -18,7 +18,7 @@ pub struct SwarmCoordinationRegistryContainer { impl SwarmCoordinationRegistryContainer { #[must_use] pub fn initialize(sender_status: SenderStatus) -> Self { - // Torrent repository stats + // // Swarm Coordination Registry Container stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 4d3bdf9a3..6248bdc73 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::databases::error::Error; use crate::databases::Database; -/// Torrent repository implementation that persists torrent metrics in a database. +/// It persists torrent metrics in a database. /// /// This repository persists only a subset of the torrent data: the torrent /// metrics, specifically the number of downloads (or completed counts) for each diff --git a/src/container.rs b/src/container.rs index 461a5b36a..7112a54e8 100644 --- a/src/container.rs +++ b/src/container.rs @@ -29,7 +29,7 @@ pub struct AppContainer { // Registar pub registar: Arc, - // Torrent Repository + // Swarm Coordination Registry Container pub swarm_coordination_registry_container: Arc, // Core @@ -58,7 +58,7 @@ impl AppContainer { let registar = Arc::new(Registar::default()); - // Torrent Repository + // Swarm Coordination Registry Container let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), @@ -97,7 +97,7 @@ impl AppContainer { // Registar registar, - // Torrent Repository + // Swarm Coordination Registry Container swarm_coordination_registry_container, // Core From 7be03663946dcbcf2f1ee28d62b1c8d30741cd42 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:42:39 +0100 Subject: [PATCH 149/247] feat: [#1534] add new metric to count peers reverting state from complete to any other state The metric is: ``` swarm_coordination_registry_peers_completed_state_reverted_total 1 ``` --- .../src/statistics/event/handler.rs | 27 +++++++++++++++---- .../src/statistics/mod.rs | 10 +++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/packages/swarm-coordination-registry/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs index 17b012086..1d3f8f32c 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/handler.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/handler.rs @@ -8,10 +8,11 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, - SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, - SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, - SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; #[allow(clippy::too_many_lines)] @@ -103,6 +104,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: } => { tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + // If the peer's role has changed, we need to adjust the number of + // connections if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( @@ -121,6 +124,20 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .await; } + // If the peer reverted from a completed state to any other state, + // we need to increment the counter for reverted completed. + if old_peer.is_completed() && !new_peer.is_completed() { + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + &LabelSet::default(), + now, + ) + .await; + } + + // Regardless of the role change, we still need to increment the + // counter for updated peers. let label_set = label_set_for_peer(&new_peer); let _unused = stats_repository @@ -134,7 +151,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); - let _unused = stats_repository + let _unused: Result<(), torrust_tracker_metrics::metric_collection::Error> = stats_repository .increment_counter( &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index 5b9b7f376..a4bf4c018 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -26,6 +26,8 @@ const SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL: &str = "swarm_coordinatio const SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL: &str = "swarm_coordination_registry_peer_connections_total"; const SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL: &str = "swarm_coordination_registry_unique_peers_total"; // todo: not implemented yet const SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_peers_inactive_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL: &str = + "swarm_coordination_registry_peers_completed_state_reverted_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -103,5 +105,13 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("The total number of inactive peers.")), ); + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "The total number of peers whose completed state was reverted.", + )), + ); + metrics } From d81e59e2e11787ea99fa123091154a524c85f8eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 17:27:03 +0100 Subject: [PATCH 150/247] fix: [#1565] ban service should work with stats disabled --- .../src/banning/event/handler.rs | 19 ++++++ .../src/banning/event/listener.rs | 58 +++++++++++++++++++ .../src/banning/event/mod.rs | 2 + .../udp-tracker-server/src/banning/mod.rs | 1 + .../udp-tracker-server/src/environment.rs | 51 ++++++++-------- packages/udp-tracker-server/src/lib.rs | 1 + .../src/statistics/event/handler/error.rs | 15 ----- .../src/statistics/event/handler/mod.rs | 13 +---- .../event/handler/request_aborted.rs | 6 -- .../event/handler/request_accepted.rs | 14 ----- .../event/handler/request_banned.rs | 6 -- .../event/handler/request_received.rs | 4 -- .../statistics/event/handler/response_sent.rs | 6 -- .../src/statistics/event/listener.rs | 15 ++--- src/app.rs | 20 +++++-- src/bootstrap/jobs/udp_tracker_server.rs | 11 +++- 16 files changed, 137 insertions(+), 105 deletions(-) create mode 100644 packages/udp-tracker-server/src/banning/event/handler.rs create mode 100644 packages/udp-tracker-server/src/banning/event/listener.rs create mode 100644 packages/udp-tracker-server/src/banning/event/mod.rs create mode 100644 packages/udp-tracker-server/src/banning/mod.rs diff --git a/packages/udp-tracker-server/src/banning/event/handler.rs b/packages/udp-tracker-server/src/banning/event/handler.rs new file mode 100644 index 000000000..2d77d0979 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/handler.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ErrorKind, Event}; + +pub async fn handle_event(event: Event, ban_service: &Arc>, _now: DurationSinceUnixEpoch) { + if let Event::UdpError { + context, + kind: _, + error: ErrorKind::ConnectionCookie(_msg), + } = event + { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } +} diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs new file mode 100644 index 000000000..ee1a4366f --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -0,0 +1,58 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::CurrentClock; + +#[must_use] +pub fn run_event_listener(receiver: Receiver, ban_service: &Arc>) -> JoinHandle<()> { + let ban_service_clone = ban_service.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); + + tokio::spawn(async move { + dispatch_events(receiver, ban_service_clone).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>) { + let shutdown_signal = tokio::signal::ctrl_c(); + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener (banning)"); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &ban_service, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/udp-tracker-server/src/banning/event/mod.rs b/packages/udp-tracker-server/src/banning/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/udp-tracker-server/src/banning/mod.rs b/packages/udp-tracker-server/src/banning/mod.rs new file mode 100644 index 000000000..53f112654 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/mod.rs @@ -0,0 +1 @@ +pub mod event; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index f48b3a7c1..6c03cc75f 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -1,13 +1,11 @@ use std::net::SocketAddr; use std::sync::Arc; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_primitives::peer; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::container::UdpTrackerServerContainer; @@ -25,22 +23,8 @@ where pub registar: Registar, pub server: Server, pub udp_core_event_listener_job: Option>, - pub udp_server_event_listener_job: Option>, -} - -impl Environment -where - S: std::fmt::Debug + std::fmt::Display, -{ - /// Add a torrent to the tracker - #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.container - .tracker_core_container - .in_memory_torrent_repository - .handle_announcement(info_hash, peer, None) - .await; - } + pub udp_server_stats_event_listener_job: Option>, + pub udp_server_banning_event_listener_job: Option>, } impl Environment { @@ -60,7 +44,8 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, - udp_server_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, } } @@ -78,10 +63,15 @@ impl Environment { &self.container.udp_tracker_core_container.stats_repository, )); - // Start the UDP tracker server event listener - let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( + // Start the UDP tracker server event listener (statistics) + let udp_server_stats_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_server_container.stats_repository, + )); + + // Start the UDP tracker server event listener (banning) + let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( + self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_core_container.ban_service, )); @@ -102,7 +92,8 @@ impl Environment { registar: self.registar.clone(), server, udp_core_event_listener_job, - udp_server_event_listener_job, + udp_server_stats_event_listener_job, + udp_server_banning_event_listener_job, } } } @@ -131,11 +122,18 @@ impl Environment { udp_core_event_listener_job.abort(); } - // Stop the UDP tracker server event listener - if let Some(udp_server_event_listener_job) = self.udp_server_event_listener_job { + // Stop the UDP tracker server event listener (statistics) + if let Some(udp_server_stats_event_listener_job) = self.udp_server_stats_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_server_stats_event_listener_job.abort(); + } + + // Stop the UDP tracker server event listener (banning) + if let Some(udp_server_banning_event_listener_job) = self.udp_server_banning_event_listener_job { // todo: send a message to the event listener to stop and wait for // it to finish - udp_server_event_listener_job.abort(); + udp_server_banning_event_listener_job.abort(); } // Stop the UDP tracker server @@ -149,7 +147,8 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, - udp_server_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, } } diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 996c41917..58a3830e1 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -634,6 +634,7 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +pub mod banning; pub mod container; pub mod environment; pub mod error; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 7327386a3..7bde032fe 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -1,8 +1,4 @@ -use std::sync::Arc; - use aquatic_udp_protocol::PeerClient; -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -16,16 +12,9 @@ pub async fn handle_event( opt_udp_request_kind: Option, error_kind: ErrorKind, repository: &Repository, - ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - if let ErrorKind::ConnectionCookie(_msg) = error_kind.clone() { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&connection_context.client_socket_addr().ip()); - } - update_global_fixed_metrics(&connection_context, repository).await; - update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } @@ -126,9 +115,7 @@ fn extract_name_and_version(peer_client: &PeerClient) -> (String, String) { #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -141,7 +128,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -157,7 +143,6 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs index c8ac864a3..9e7f5cd47 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs @@ -5,21 +5,12 @@ mod request_banned; mod request_received; mod response_sent; -use std::sync::Arc; - -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -pub async fn handle_event( - event: Event, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { Event::UdpRequestAborted { context } => { request_aborted::handle_event(context, stats_repository, now).await; @@ -41,7 +32,7 @@ pub async fn handle_event( response_sent::handle_event(context, kind, req_processing_time, stats_repository, now).await; } Event::UdpError { context, kind, error } => { - error::handle_event(context, kind, error, stats_repository, ban_service, now).await; + error::handle_event(context, kind, error, stats_repository, now).await; } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index 270ec2a45..fc701df75 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -27,9 +27,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -41,7 +39,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -55,7 +52,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -68,7 +64,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -82,7 +77,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 0007a18b0..b296f8ec9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -55,9 +55,7 @@ pub async fn handle_event( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -70,7 +68,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -85,7 +82,6 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -98,7 +94,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -115,7 +110,6 @@ mod tests { }, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -128,7 +122,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -143,7 +136,6 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -156,7 +148,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -171,7 +162,6 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -184,7 +174,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -201,7 +190,6 @@ mod tests { }, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -214,7 +202,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -229,7 +216,6 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index 74641574a..ce6e179a3 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -27,9 +27,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -41,7 +39,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -55,7 +52,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -68,7 +64,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -82,7 +77,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index 8333258c2..89f306f6a 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -34,9 +34,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -48,7 +46,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestReceived { @@ -62,7 +59,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 0038ac5f9..4e167a10e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -107,9 +107,7 @@ pub async fn handle_event( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -122,7 +120,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -142,7 +139,6 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -155,7 +151,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -175,7 +170,6 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index e6c9a85ce..ae659c15e 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -13,24 +11,19 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener( - receiver: Receiver, - repository: &Arc, - ban_service: &Arc>, -) -> JoinHandle<()> { +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let repository_clone = repository.clone(); - let ban_service_clone = ban_service.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, repository_clone, ban_service_clone).await; + dispatch_events(receiver, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc, ban_service: Arc>) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -45,7 +38,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match result { - Ok(event) => handle_event(event, &stats_repository, &ban_service, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/app.rs b/src/app.rs index 5050c1dd1..58d758d7f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -75,7 +75,8 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); - start_udp_server_event_listener(config, app_container, &mut job_manager); + start_udp_server_stats_event_listener(config, app_container, &mut job_manager); + start_udp_server_banning_event_listener(app_container, &mut job_manager); start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; @@ -164,10 +165,21 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { +fn start_udp_server_stats_event_listener( + config: &Configuration, + app_container: &Arc, + job_manager: &mut JobManager, +) { job_manager.push_opt( - "udp_server_event_listener", - jobs::udp_tracker_server::start_event_listener(config, app_container), + "udp_server_stats_event_listener", + jobs::udp_tracker_server::start_stats_event_listener(config, app_container), + ); +} + +fn start_udp_server_banning_event_listener(app_container: &Arc, job_manager: &mut JobManager) { + job_manager.push( + "udp_server_banning_event_listener", + jobs::udp_tracker_server::start_banning_event_listener(app_container), ); } diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 8a4c2a273..0910fdaf5 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -5,12 +5,11 @@ use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), &app_container.udp_tracker_server_container.stats_repository, - &app_container.udp_tracker_core_services.ban_service, ); Some(job) } else { @@ -18,3 +17,11 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { + torrust_udp_tracker_server::banning::event::listener::run_event_listener( + app_container.udp_tracker_server_container.event_bus.receiver(), + &app_container.udp_tracker_core_services.ban_service, + ) +} From f7b80ed937fd98e6c31f47e87fd005990bbf25a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 13:50:26 +0100 Subject: [PATCH 151/247] feat: [#1570] add new metric for banned IPs total --- .../src/banning/event/handler.rs | 30 ++++++++++++++++++- .../src/banning/event/listener.rs | 14 ++++++--- .../udp-tracker-server/src/environment.rs | 1 + .../udp-tracker-server/src/statistics/mod.rs | 7 +++++ src/bootstrap/jobs/udp_tracker_server.rs | 1 + 5 files changed, 48 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/banning/event/handler.rs b/packages/udp-tracker-server/src/banning/event/handler.rs index 2d77d0979..4876323a8 100644 --- a/packages/udp-tracker-server/src/banning/event/handler.rs +++ b/packages/udp-tracker-server/src/banning/event/handler.rs @@ -2,11 +2,20 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ErrorKind, Event}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_IPS_BANNED_TOTAL; -pub async fn handle_event(event: Event, ban_service: &Arc>, _now: DurationSinceUnixEpoch) { +pub async fn handle_event( + event: Event, + ban_service: &Arc>, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { if let Event::UdpError { context, kind: _, @@ -14,6 +23,25 @@ pub async fn handle_event(event: Event, ban_service: &Arc>, _ } = event { let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + + update_metric_for_banned_ips_total(repository, ban_service.get_banned_ips_total(), now).await; + } +} + +#[allow(clippy::cast_precision_loss)] +async fn update_metric_for_banned_ips_total(repository: &Repository, ips_banned_total: usize, now: DurationSinceUnixEpoch) { + match repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::default(), + ips_banned_total as f64, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), } } diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs index ee1a4366f..fee3395fa 100644 --- a/packages/udp-tracker-server/src/banning/event/listener.rs +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -9,22 +9,28 @@ use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, ban_service: &Arc>) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + ban_service: &Arc>, + repository: &Arc, +) -> JoinHandle<()> { let ban_service_clone = ban_service.clone(); + let repository_clone = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); tokio::spawn(async move { - dispatch_events(receiver, ban_service_clone).await; + dispatch_events(receiver, ban_service_clone, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); }) } -async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>) { +async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>, repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -39,7 +45,7 @@ async fn dispatch_events(mut receiver: Receiver, ban_service: Arc { match result { - Ok(event) => handle_event(event, &ban_service, CurrentClock::now()).await, + Ok(event) => handle_event(event, &ban_service, &repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 6c03cc75f..61b1cba63 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -73,6 +73,7 @@ impl Environment { let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_core_container.ban_service, + &self.container.udp_tracker_server_container.stats_repository, )); // Start the UDP tracker server diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index a7da2dc63..ebb3df0bf 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -10,6 +10,7 @@ use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; @@ -33,6 +34,12 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("Total number of UDP requests banned")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of IPs banned from UDP requests")), + ); + metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), Some(Unit::Count), diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 0910fdaf5..3e8a7aaa8 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -23,5 +23,6 @@ pub fn start_banning_event_listener(app_container: &Arc) -> JoinHa torrust_udp_tracker_server::banning::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), &app_container.udp_tracker_core_services.ban_service, + &app_container.udp_tracker_server_container.stats_repository, ) } From 12d69179a8c8a2d240d7860ffe2e84afc4082d62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 15:42:55 +0100 Subject: [PATCH 152/247] feat: [#1571] increase broadcaster channel capacity to 65536 --- packages/events/src/broadcaster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index d0a511cd4..79c83df8a 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -5,7 +5,7 @@ use tokio::sync::broadcast::{self}; use crate::receiver::{Receiver, RecvError}; use crate::sender::{SendError, Sender}; -const CHANNEL_CAPACITY: usize = 32768; +const CHANNEL_CAPACITY: usize = 65536; /// An event sender and receiver implementation using a broadcast channel. #[derive(Clone, Debug)] From 02433cbe809d72e066b1bd3c3461e4349a201c67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 18:56:26 +0100 Subject: [PATCH 153/247] fix: [#1569] Prometheus txt export format. Only one HELP and TYPE header per metric Current format: ``` # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0087"} 4 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (FD66)",client_software_version=""} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (SP)",client_software_version="3605"} 631 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (TIX0325)",client_software_version=""} 14 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0202"} 6754 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (XF)",client_software_version="9400"} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0090"} 7 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="2.32"} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (61-b39e)",client_software_version=""} 1 ``` Expected format: ``` # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0087"} 4 udp_tracker_server_connection_id_errors_total{client_software_name="Other (FD66)",client_software_version=""} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (SP)",client_software_version="3605"} 631 udp_tracker_server_connection_id_errors_total{client_software_name="Other (TIX0325)",client_software_version=""} 14 udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0202"} 6754 udp_tracker_server_connection_id_errors_total{client_software_name="Other (XF)",client_software_version="9400"} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0090"} 7 udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="2.32"} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (61-b39e)",client_software_version=""} 1 ``` A line break after each metric has also been added to improve readability. --- packages/metrics/src/label/set.rs | 8 ++ packages/metrics/src/lib.rs | 6 +- packages/metrics/src/metric/mod.rs | 102 ++++++---------------- packages/metrics/src/metric_collection.rs | 31 ++++--- packages/metrics/src/sample.rs | 6 +- packages/metrics/src/sample_collection.rs | 6 +- 6 files changed, 64 insertions(+), 95 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 1c2c3e27e..cab457f42 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -16,6 +16,10 @@ impl LabelSet { pub fn upsert(&mut self, key: LabelName, value: LabelValue) { self.items.insert(key, value); } + + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } } impl Display for LabelSet { @@ -157,6 +161,10 @@ impl<'de> Deserialize<'de> for LabelSet { impl PrometheusSerializable for LabelSet { fn to_prometheus(&self) -> String { + if self.is_empty() { + return String::new(); + } + let items = self.items.iter().fold(String::new(), |mut output, label_pair| { if !output.is_empty() { output.push(','); diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index 95d70bf6c..997cd3c8c 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -12,12 +12,12 @@ pub const METRICS_TARGET: &str = "METRICS"; #[cfg(test)] mod tests { - /// It removes leading and trailing whitespace from each line, and empty lines. + /// It removes leading and trailing whitespace from each line. pub fn format_prometheus_output(output: &str) -> String { output .lines() - .map(str::trim) - .filter(|line| !line.is_empty()) + .map(str::trim_start) + .map(str::trim_end) .collect::>() .join("\n") } diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 6f254023f..df743c519 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,19 +103,6 @@ impl Metric { } } -/// `PrometheusMetricSample` is a wrapper around types that provides methods to -/// convert the metric and its measurement into a Prometheus-compatible format. -/// -/// In Prometheus, a metric is a time series that consists of a name, a set of -/// labels, and a value. The sample value needs data from the `Metric` and -/// `Measurement` structs, as well as the `LabelSet` that defines the labels for -/// the metric. -struct PrometheusMetricSample<'a, T> { - metric: &'a Metric, - measurement: &'a Measurement, - label_set: &'a LabelSet, -} - enum PrometheusType { Counter, Gauge, @@ -130,91 +117,58 @@ impl PrometheusSerializable for PrometheusType { } } -impl PrometheusMetricSample<'_, T> { - fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { - format!( - // Format: - // # HELP - // # TYPE - // {label_set} - "{}{}{}", - self.help_line(), - self.type_line(prometheus_type), - self.metric_line() - ) - } - - fn help_line(&self) -> String { - if let Some(description) = &self.metric.opt_description { - format!( - // Format: # HELP - "# HELP {} {}\n", - self.metric.name().to_prometheus(), - description.to_prometheus() - ) +impl Metric { + #[must_use] + fn prometheus_help_line(&self) -> String { + if let Some(description) = &self.opt_description { + format!("# HELP {} {}", self.name.to_prometheus(), description.to_prometheus()) } else { String::new() } } - fn type_line(&self, kind: &PrometheusType) -> String { - format!("# TYPE {} {}\n", self.metric.name().to_prometheus(), kind.to_prometheus()) + #[must_use] + fn prometheus_type_line(&self, prometheus_type: &PrometheusType) -> String { + format!("# TYPE {} {}", self.name.to_prometheus(), prometheus_type.to_prometheus()) } - fn metric_line(&self) -> String { + #[must_use] + fn prometheus_sample_line(&self, label_set: &LabelSet, measurement: &Measurement) -> String { format!( - // Format: {label_set} "{}{} {}", - self.metric.name.to_prometheus(), - self.label_set.to_prometheus(), - self.measurement.value().to_prometheus() + self.name.to_prometheus(), + label_set.to_prometheus(), + measurement.to_prometheus() ) } -} -impl<'a> PrometheusMetricSample<'a, Counter> { - pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { - Self { - metric, - measurement, - label_set, - } + #[must_use] + fn prometheus_samples(&self) -> String { + self.sample_collection + .iter() + .map(|(label_set, measurement)| self.prometheus_sample_line(label_set, measurement)) + .collect::>() + .join("\n") } -} -impl<'a> PrometheusMetricSample<'a, Gauge> { - pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { - Self { - metric, - measurement, - label_set, - } + fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { + let help_line = self.prometheus_help_line(); + let type_line = self.prometheus_type_line(prometheus_type); + let samples = self.prometheus_samples(); + + format!("{help_line}\n{type_line}\n{samples}") } } impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { - let samples: Vec = self - .sample_collection - .iter() - .map(|(label_set, measurement)| { - PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Counter) - }) - .collect(); - samples.join("\n") + self.to_prometheus(&PrometheusType::Counter) } } impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { - let samples: Vec = self - .sample_collection - .iter() - .map(|(label_set, measurement)| { - PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Gauge) - }) - .collect(); - samples.join("\n") + self.to_prometheus(&PrometheusType::Gauge) } } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c53d02bcf..ff932caae 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -322,7 +322,7 @@ impl PrometheusSerializable for MetricCollection { .map(Metric::::to_prometheus), ) .collect::>() - .join("\n") + .join("\n\n") } } @@ -629,14 +629,14 @@ mod tests { fn prometheus() -> String { format_prometheus_output( - r#" - # HELP http_tracker_core_announce_requests_received_total The number of announce requests received. - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - # HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. - # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge - udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - "#, + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + +# HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. +# TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge +udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +"#, ) } } @@ -750,7 +750,7 @@ mod tests { MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), None, - None, + Some(MetricDescription::new("The number of announce requests received.")), SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), @@ -765,12 +765,11 @@ mod tests { let prometheus_output = metric_collection.to_prometheus(); let expected_prometheus_output = format_prometheus_output( - r#" - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - "#, + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 +"#, ); // code-review: samples are not serialized in the same order as they are created. diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index ad4dff00e..b9cd6c312 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -50,7 +50,11 @@ impl Sample { impl PrometheusSerializable for Sample { fn to_prometheus(&self) -> String { - format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + if self.label_set.is_empty() { + format!(" {}", self.measurement.to_prometheus()) + } else { + format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + } } } diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index a87aacb63..ef88b27dd 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -155,7 +155,11 @@ impl PrometheusSerializable for SampleCollection { let mut output = String::new(); for (label_set, sample_data) in &self.samples { - let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); + if label_set.is_empty() { + let _ = write!(output, "{}", sample_data.to_prometheus()); + } else { + let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); + } } output From 9b254ce7082899a6995760f7403fb6d7efbad324 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 21:40:12 +0100 Subject: [PATCH 154/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 41 packages to latest compatible versions Updating adler2 v2.0.0 -> v2.0.1 Updating anstream v0.6.18 -> v0.6.19 Updating anstyle v1.0.10 -> v1.0.11 Updating anstyle-parse v0.2.6 -> v0.2.7 Updating anstyle-query v1.1.2 -> v1.1.3 Updating anstyle-wincon v3.0.8 -> v3.0.9 Updating async-compression v0.4.23 -> v0.4.24 Updating bindgen v0.71.1 -> v0.72.0 Updating bumpalo v3.17.0 -> v3.18.1 Updating bytemuck v1.23.0 -> v1.23.1 Updating camino v1.1.9 -> v1.1.10 Updating cc v1.2.25 -> v1.2.26 Updating cfg-if v1.0.0 -> v1.0.1 Updating clap v4.5.39 -> v4.5.40 Updating clap_builder v4.5.39 -> v4.5.40 Updating clap_derive v4.5.32 -> v4.5.40 Updating clap_lex v0.7.4 -> v0.7.5 Updating colorchoice v1.0.3 -> v1.0.4 Updating flate2 v1.1.1 -> v1.1.2 Updating fs-err v3.1.0 -> v3.1.1 Updating hashbrown v0.15.3 -> v0.15.4 Updating hyper-rustls v0.27.6 -> v0.27.7 Updating hyper-util v0.1.13 -> v0.1.14 Updating miniz_oxide v0.8.8 -> v0.8.9 Updating portable-atomic v1.11.0 -> v1.11.1 Updating reqwest v0.12.18 -> v0.12.20 Updating rustc-demangle v0.1.24 -> v0.1.25 Updating serde_spanned v0.6.8 -> v0.6.9 Updating smallvec v1.15.0 -> v1.15.1 Updating syn v2.0.101 -> v2.0.102 Updating toml v0.8.22 -> v0.8.23 Updating toml_datetime v0.6.9 -> v0.6.11 Updating toml_edit v0.22.26 -> v0.22.27 Updating toml_write v0.1.1 -> v0.1.2 Updating tower-http v0.6.5 -> v0.6.6 Updating tracing-attributes v0.1.28 -> v0.1.29 Updating tracing-core v0.1.33 -> v0.1.34 Updating unicode-width v0.2.0 -> v0.2.1 Updating wasi v0.11.0+wasi-snapshot-preview1 -> v0.11.1+wasi-snapshot-preview1 Updating windows-registry v0.4.0 -> v0.5.2 Removing windows-strings v0.3.1 Updating winnow v0.7.10 -> v0.7.11 ``` --- Cargo.lock | 277 +++++++++++++++++++++++++---------------------------- 1 file changed, 133 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index feb749d3f..269f7a3a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" @@ -81,9 +81,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -96,33 +96,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.8" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", @@ -217,9 +217,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "d615619615a650c571269c00dca41db04b9210037fa76ed8239f70404ab56985" dependencies = [ "brotli", "flate2", @@ -332,7 +332,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -455,7 +455,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -537,9 +537,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.71.1" +version = "0.72.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" dependencies = [ "bitflags 2.9.1", "cexpr", @@ -550,7 +550,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -848,7 +848,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -889,9 +889,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "bytecheck" @@ -917,9 +917,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.23.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" [[package]] name = "byteorder" @@ -935,9 +935,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "camino" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" dependencies = [ "serde", ] @@ -959,9 +959,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.25" +version = "1.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951" +checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" dependencies = [ "jobserver", "libc", @@ -979,9 +979,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1052,9 +1052,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.39" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd60e63e9be68e5fb56422e397cf9baddded06dae1d2e523401542383bc72a9f" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", "clap_derive", @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.39" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89cc6392a1f72bbeb820d71f32108f61fdaf18bc526e1d23954168a67759ef51" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstream", "anstyle", @@ -1074,21 +1074,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "cmake" @@ -1101,9 +1101,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compact_str" @@ -1336,7 +1336,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1347,7 +1347,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1391,7 +1391,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "unicode-xid", ] @@ -1403,7 +1403,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1430,7 +1430,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1577,9 +1577,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "libz-sys", @@ -1677,7 +1677,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1689,7 +1689,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1701,14 +1701,14 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "fs-err" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" dependencies = [ "autocfg", "tokio", @@ -1789,7 +1789,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1846,7 +1846,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] @@ -1931,9 +1931,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -1946,7 +1946,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2066,9 +2066,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http", "hyper", @@ -2098,9 +2098,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ "base64 0.22.1", "bytes", @@ -2292,7 +2292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -2522,7 +2522,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2564,7 +2564,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2581,9 +2581,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] @@ -2595,7 +2595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] @@ -2622,7 +2622,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2672,7 +2672,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "termcolor", "thiserror 1.0.69", ] @@ -2877,7 +2877,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2961,7 +2961,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2984,7 +2984,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3115,9 +3115,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "portable-atomic-util" @@ -3216,7 +3216,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3236,7 +3236,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "version_check", "yansi", ] @@ -3468,9 +3468,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.18" +version = "0.12.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" +checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" dependencies = [ "base64 0.22.1", "bytes", @@ -3484,12 +3484,10 @@ dependencies = [ "hyper-rustls", "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", "rustls-pki-types", @@ -3588,7 +3586,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.101", + "syn 2.0.102", "unicode-ident", ] @@ -3624,9 +3622,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -3846,7 +3844,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3893,14 +3891,14 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -3944,7 +3942,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4016,9 +4014,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" @@ -4057,7 +4055,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4068,7 +4066,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4121,9 +4119,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" dependencies = [ "proc-macro2", "quote", @@ -4147,7 +4145,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4268,7 +4266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "unicode-linebreak", - "unicode-width 0.2.0", + "unicode-width 0.2.1", ] [[package]] @@ -4297,7 +4295,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4308,7 +4306,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4412,7 +4410,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4476,9 +4474,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -4488,18 +4486,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.9.0", "serde", @@ -4511,9 +4509,9 @@ dependencies = [ [[package]] name = "toml_write" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "torrust-axum-health-check-api-server" @@ -4943,9 +4941,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc2d9e086a412a451384326f521c8123a99a466b329941a9403696bff9b0da2" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", "bitflags 2.9.1", @@ -4991,20 +4989,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -5100,9 +5098,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "unicode-xid" @@ -5197,9 +5195,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -5232,7 +5230,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "wasm-bindgen-shared", ] @@ -5267,7 +5265,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5332,7 +5330,7 @@ dependencies = [ "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.2", + "windows-strings", ] [[package]] @@ -5343,7 +5341,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5354,7 +5352,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5365,13 +5363,13 @@ checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" dependencies = [ + "windows-link", "windows-result", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] @@ -5383,15 +5381,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.4.2" @@ -5549,9 +5538,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -5616,7 +5605,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "synstructure", ] @@ -5647,7 +5636,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5658,7 +5647,7 @@ checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5678,7 +5667,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "synstructure", ] @@ -5718,7 +5707,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] From 7e722c06f17603c9d049692f49eca4e1693b7cf7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 21:57:08 +0100 Subject: [PATCH 155/247] fix: clippy errors --- packages/axum-http-tracker-server/src/server.rs | 6 +++--- packages/axum-http-tracker-server/src/v1/routes.rs | 2 +- packages/axum-rest-tracker-api-server/src/routes.rs | 4 ++-- packages/axum-rest-tracker-api-server/src/server.rs | 4 ++-- .../torrent-repository-benchmarking/tests/repository/mod.rs | 4 +++- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 1775a3d72..ba0dd8c6e 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -47,7 +47,7 @@ impl Launcher { #[instrument(skip(self, http_tracker_container, tx_start, rx_halt))] fn start( &self, - http_tracker_container: Arc, + http_tracker_container: &Arc, tx_start: Sender, rx_halt: Receiver, ) -> BoxFuture<'static, ()> { @@ -69,7 +69,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); - let app = router(http_tracker_container, service_binding.clone()); + let app = router(http_tracker_container, &service_binding); let running = Box::pin(async { match tls { @@ -176,7 +176,7 @@ impl HttpServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(http_tracker_container, tx_start, rx_halt); + let server = launcher.start(&http_tracker_container, tx_start, rx_halt); server.await; diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 3fe467a0d..df395cd9a 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -31,7 +31,7 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[instrument(skip(http_tracker_container, server_service_binding))] -pub fn router(http_tracker_container: Arc, server_service_binding: ServiceBinding) -> Router { +pub fn router(http_tracker_container: &Arc, server_service_binding: &ServiceBinding) -> Router { let server_socket_addr = server_service_binding.bind_address(); Router::new() diff --git a/packages/axum-rest-tracker-api-server/src/routes.rs b/packages/axum-rest-tracker-api-server/src/routes.rs index c18451c89..78b7818d9 100644 --- a/packages/axum-rest-tracker-api-server/src/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/routes.rs @@ -36,7 +36,7 @@ use crate::API_LOG_TARGET; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] pub fn router( - http_api_container: Arc, + http_api_container: &Arc, access_tokens: Arc, server_socket_addr: SocketAddr, ) -> Router { @@ -44,7 +44,7 @@ pub fn router( let api_url_prefix = "/api"; - let router = v1::routes::add(api_url_prefix, router, &http_api_container); + let router = v1::routes::add(api_url_prefix, router, http_api_container); let state = State { access_tokens }; diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 04c51d8fb..b358345fb 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -140,7 +140,7 @@ impl ApiServer { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher.start(http_api_container, access_tokens, tx_start, rx_halt).await; + let _task = launcher.start(&http_api_container, access_tokens, tx_start, rx_halt).await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -241,7 +241,7 @@ impl Launcher { #[instrument(skip(self, http_api_container, access_tokens, tx_start, rx_halt))] pub fn start( &self, - http_api_container: Arc, + http_api_container: &Arc, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index e555654ca..c3589ce68 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -450,7 +450,9 @@ async fn it_should_import_persistent_torrents( make(&repo, &entries).await; let mut downloaded = repo.get_metrics().await.total_downloaded; - persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + for d in persistent_torrents.values() { + downloaded += u64::from(*d); + } repo.import_persistent(&persistent_torrents).await; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 3c8204bf5..43c5bc4d5 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -28,7 +28,7 @@ use crate::event::UdpRequestKind; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] -pub(super) struct CookieTimeValues { +pub struct CookieTimeValues { pub(super) issue_time: f64, pub(super) valid_range: Range, } From 64be8472feffb8217cdd7ce505510b4d234d5981 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jun 2025 14:16:02 +0100 Subject: [PATCH 156/247] feat: [#1446] add aggregate function sum to metric collection It allows sum metric samples matching a given criteria. The criteria is a label set. Sample values are added if they contain all the label name/value pairs specified in the criteria. For example, given these metric's samples in Prometheus export text format: ``` udp_tracker_server_requests_accepted_total{request_kind="scrape",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 213118 udp_tracker_server_requests_accepted_total{request_kind="announce",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 16460553 udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6868",server_binding_protocol="udp"} 617 udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 17148137 ``` And the criteria: it should contain the label `request_kind` with the value `connect`. It should return: 617 + 17148137 = 17148754 --- .../src/statistics/metrics.rs | 2 +- packages/metrics/src/aggregate.rs | 28 ++ packages/metrics/src/counter.rs | 18 ++ packages/metrics/src/gauge.rs | 11 + packages/metrics/src/label/set.rs | 21 ++ packages/metrics/src/lib.rs | 1 + packages/metrics/src/metric/aggregate/mod.rs | 1 + packages/metrics/src/metric/aggregate/sum.rs | 283 ++++++++++++++++++ packages/metrics/src/metric/mod.rs | 1 + .../src/metric_collection/aggregate.rs | 112 +++++++ .../mod.rs} | 22 +- .../src/statistics/metrics.rs | 2 +- .../tracker-core/src/statistics/metrics.rs | 2 +- .../src/statistics/metrics.rs | 2 +- .../src/statistics/metrics.rs | 2 +- 15 files changed, 493 insertions(+), 15 deletions(-) create mode 100644 packages/metrics/src/aggregate.rs create mode 100644 packages/metrics/src/metric/aggregate/mod.rs create mode 100644 packages/metrics/src/metric/aggregate/sum.rs create mode 100644 packages/metrics/src/metric_collection/aggregate.rs rename packages/metrics/src/{metric_collection.rs => metric_collection/mod.rs} (98%) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index bf053b04e..650194d43 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -33,7 +33,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs new file mode 100644 index 000000000..875360cd9 --- /dev/null +++ b/packages/metrics/src/aggregate.rs @@ -0,0 +1,28 @@ +use derive_more::Display; + +#[derive(Debug, Display, Clone, Copy, PartialEq)] +pub struct AggregateValue(f64); + +impl AggregateValue { + #[must_use] + pub fn new(value: f64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> f64 { + self.0 + } +} + +impl From for AggregateValue { + fn from(value: f64) -> Self { + Self(value) + } +} + +impl From for f64 { + fn from(value: AggregateValue) -> Self { + value.0 + } +} diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index ac6d21836..3148ab4c3 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -17,6 +17,11 @@ impl Counter { self.0 } + #[must_use] + pub fn primitive(&self) -> u64 { + self.value() + } + pub fn increment(&mut self, value: u64) { self.0 += value; } @@ -26,12 +31,25 @@ impl Counter { } } +impl From for Counter { + fn from(value: u32) -> Self { + Self(u64::from(value)) + } +} + impl From for Counter { fn from(value: u64) -> Self { Self(value) } } +impl From for Counter { + fn from(value: i32) -> Self { + #[allow(clippy::cast_sign_loss)] + Self(value as u64) + } +} + impl From for u64 { fn from(counter: Counter) -> Self { counter.value() diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index 3f6089955..a2ef8135f 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -17,6 +17,11 @@ impl Gauge { self.0 } + #[must_use] + pub fn primitive(&self) -> f64 { + self.value() + } + pub fn set(&mut self, value: f64) { self.0 = value; } @@ -30,6 +35,12 @@ impl Gauge { } } +impl From for Gauge { + fn from(value: f32) -> Self { + Self(f64::from(value)) + } +} + impl From for Gauge { fn from(value: f64) -> Self { Self(value) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index cab457f42..673f330c1 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -1,3 +1,4 @@ +use std::collections::btree_map::Iter; use std::collections::BTreeMap; use std::fmt::Display; @@ -12,6 +13,11 @@ pub struct LabelSet { } impl LabelSet { + #[must_use] + pub fn empty() -> Self { + Self { items: BTreeMap::new() } + } + /// Insert a new label pair or update the value of an existing label. pub fn upsert(&mut self, key: LabelName, value: LabelValue) { self.items.insert(key, value); @@ -20,6 +26,21 @@ impl LabelSet { pub fn is_empty(&self) -> bool { self.items.is_empty() } + + pub fn contains_pair(&self, name: &LabelName, value: &LabelValue) -> bool { + match self.items.get(name) { + Some(existing_value) => existing_value == value, + None => false, + } + } + + pub fn matches(&self, criteria: &LabelSet) -> bool { + criteria.iter().all(|(key, value)| self.contains_pair(key, value)) + } + + pub fn iter(&self) -> Iter<'_, LabelName, LabelValue> { + self.items.iter() + } } impl Display for LabelSet { diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index 997cd3c8c..c53e9dd02 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod counter; pub mod gauge; pub mod label; diff --git a/packages/metrics/src/metric/aggregate/mod.rs b/packages/metrics/src/metric/aggregate/mod.rs new file mode 100644 index 000000000..dce785d95 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/mod.rs @@ -0,0 +1 @@ +pub mod sum; diff --git a/packages/metrics/src/metric/aggregate/sum.rs b/packages/metrics/src/metric/aggregate/sum.rs new file mode 100644 index 000000000..f08ea7d55 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/sum.rs @@ -0,0 +1,283 @@ +use crate::aggregate::AggregateValue; +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::Metric; + +pub trait Sum { + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue; +} + +impl Sum for Metric { + #[allow(clippy::cast_precision_loss)] + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { + let sum: f64 = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive() as f64) + .sum(); + + sum.into() + } +} + +impl Sum for Metric { + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { + let sum: f64 = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + sum.into() + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::aggregate::AggregateValue; + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::sum::Sum; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec>, + } + + impl Default for MetricBuilder { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl MetricBuilder { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0.into(), + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3.0.into(), + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3.0.into(), + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1.0.into(), + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0.into(), + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0.into(), + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(u64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + #[allow(clippy::cast_precision_loss)] + (u64::MAX as f64).into(), + ), + ] + } + + fn gauge_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0.into(), + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3.0.into(), + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3.0.into(), + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1.0.into(), + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0.into(), + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0.into(), + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(3.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0.into(), + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(f64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + f64::MAX.into(), + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert_eq!( + sum, *expected_value, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert_eq!( + sum, *expected_value, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } +} diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index df743c519..8ee24493a 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod description; pub mod name; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs new file mode 100644 index 000000000..7fd744d92 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -0,0 +1,112 @@ +use crate::aggregate::AggregateValue; +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum as MetricSumTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Sum { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; +} + +impl Sum for MetricCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { + return Some(value); + } + + self.gauges.sum(metric_name, label_set_criteria) + } +} + +impl Sum for MetricKindCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + } +} + +impl Sum for MetricKindCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_summing_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::Sum; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0.into()) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0.into()) + ); + } + } +} diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection/mod.rs similarity index 98% rename from packages/metrics/src/metric_collection.rs rename to packages/metrics/src/metric_collection/mod.rs index ff932caae..e183236aa 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection/mod.rs @@ -1,3 +1,5 @@ +pub mod aggregate; + use std::collections::{HashMap, HashSet}; use serde::ser::{SerializeSeq, Serializer}; @@ -103,7 +105,7 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn increase_counter( + pub fn increment_counter( &mut self, name: &MetricName, label_set: &LabelSet, @@ -669,7 +671,7 @@ udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip // First create a counter collection - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); // Then try to create a gauge with the same name @@ -690,7 +692,7 @@ udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip .unwrap(); // Then try to create a counter with the same name - let result = collection.increase_counter(&metric_name!("test_metric"), &label_set, time); + let result = collection.increment_counter(&metric_name!("test_metric"), &label_set, time); assert!(result.is_err()); } @@ -803,7 +805,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); @@ -824,12 +826,12 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); collection2 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let result = collection1.merge(&collection2); @@ -843,7 +845,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); @@ -940,7 +942,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( @@ -958,10 +960,10 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( diff --git a/packages/swarm-coordination-registry/src/statistics/metrics.rs b/packages/swarm-coordination-registry/src/statistics/metrics.rs index f8ab3f9d9..d62a1ba6e 100644 --- a/packages/swarm-coordination-registry/src/statistics/metrics.rs +++ b/packages/swarm-coordination-registry/src/statistics/metrics.rs @@ -21,7 +21,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs index 02cc51499..a5caaf1cf 100644 --- a/packages/tracker-core/src/statistics/metrics.rs +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -21,7 +21,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 94aa7d08f..e6ff8d5f6 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -47,7 +47,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 7b18f6418..ac6250872 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -78,7 +78,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors From 4da4f8351c1e616421bf0e8b5b83b1926fe34cd4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jun 2025 08:49:47 +0100 Subject: [PATCH 157/247] refactor: [#1446] rename vars --- packages/metrics/src/label/set.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 673f330c1..542f5d2e6 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -19,8 +19,8 @@ impl LabelSet { } /// Insert a new label pair or update the value of an existing label. - pub fn upsert(&mut self, key: LabelName, value: LabelValue) { - self.items.insert(key, value); + pub fn upsert(&mut self, name: LabelName, value: LabelValue) { + self.items.insert(name, value); } pub fn is_empty(&self) -> bool { @@ -35,7 +35,7 @@ impl LabelSet { } pub fn matches(&self, criteria: &LabelSet) -> bool { - criteria.iter().all(|(key, value)| self.contains_pair(key, value)) + criteria.iter().all(|(name, value)| self.contains_pair(name, value)) } pub fn iter(&self) -> Iter<'_, LabelName, LabelValue> { @@ -48,7 +48,7 @@ impl Display for LabelSet { let items = self .items .iter() - .map(|(key, value)| format!("{key}=\"{value}\"")) + .map(|(name, value)| format!("{name}=\"{value}\"")) .collect::>() .join(","); @@ -90,8 +90,8 @@ impl From> for LabelSet { fn from(vec: Vec) -> Self { let mut items = BTreeMap::new(); - for (key, value) in vec { - items.insert(key, value); + for (name, value) in vec { + items.insert(name, value); } Self { items } @@ -160,8 +160,8 @@ impl Serialize for LabelSet { { self.items .iter() - .map(|(key, value)| SerializedLabel { - name: key.clone(), + .map(|(name, value)| SerializedLabel { + name: name.clone(), value: value.clone(), }) .collect::>() From 0d134396e53c9fe75becaad8f03072a0e53d3a22 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jun 2025 09:50:24 +0100 Subject: [PATCH 158/247] test: [#1446] add more tests to metrics package --- packages/metrics/.gitignore | 2 +- packages/metrics/README.md | 22 +++ packages/metrics/cSpell.json | 19 +++ packages/metrics/src/aggregate.rs | 115 ++++++++++++++++ packages/metrics/src/counter.rs | 156 ++++++++++++++++++++++ packages/metrics/src/gauge.rs | 124 +++++++++++++++++ packages/metrics/src/label/set.rs | 112 +++++++++++++++- packages/metrics/src/metric/mod.rs | 29 ++++ packages/metrics/src/sample.rs | 18 +++ packages/metrics/src/sample_collection.rs | 50 +++++++ 10 files changed, 645 insertions(+), 2 deletions(-) create mode 100644 packages/metrics/cSpell.json diff --git a/packages/metrics/.gitignore b/packages/metrics/.gitignore index 0b1372e5c..6350e9868 100644 --- a/packages/metrics/.gitignore +++ b/packages/metrics/.gitignore @@ -1 +1 @@ -./.coverage +.coverage diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 627640eec..885d6fa45 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -6,6 +6,28 @@ A library with the metrics types used by the [Torrust Tracker](https://github.co [Crate documentation](https://docs.rs/torrust-tracker-metrics). +## Testing + +Run coverage report: + +```console +cargo llvm-cov --package torrust-tracker-metrics +``` + +Generate LCOV report with `llvm-cov` (for Visual Studio Code extension): + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info +``` + +Generate HTML report with `llvm-cov`: + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage +``` + ## Acknowledgements We copied some parts like units or function names and signatures from the crate [metrics](https://crates.io/crates/metrics) because we wanted to make it compatible as much as possible with it. In the future, we may consider using the `metrics` crate directly instead of maintaining our own version. diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json new file mode 100644 index 000000000..1a2c13d2e --- /dev/null +++ b/packages/metrics/cSpell.json @@ -0,0 +1,19 @@ +{ + "words": [ + "cloneable", + "formatjson", + "Gibibytes", + "Kibibytes", + "Mebibytes", + "ñaca", + "rstest", + "subsec", + "Tebibytes", + "thiserror" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ] +} diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs index 875360cd9..e480be396 100644 --- a/packages/metrics/src/aggregate.rs +++ b/packages/metrics/src/aggregate.rs @@ -26,3 +26,118 @@ impl From for f64 { value.0 } } + +#[cfg(test)] +mod tests { + use approx::assert_relative_eq; + + use super::*; + + #[test] + fn it_should_be_created_with_new() { + let value = AggregateValue::new(42.5); + assert_relative_eq!(value.value(), 42.5); + } + + #[test] + fn it_should_return_the_inner_value() { + let value = AggregateValue::new(123.456); + assert_relative_eq!(value.value(), 123.456); + } + + #[test] + fn it_should_handle_zero_value() { + let value = AggregateValue::new(0.0); + assert_relative_eq!(value.value(), 0.0); + } + + #[test] + fn it_should_handle_negative_values() { + let value = AggregateValue::new(-42.5); + assert_relative_eq!(value.value(), -42.5); + } + + #[test] + fn it_should_handle_infinity() { + let value = AggregateValue::new(f64::INFINITY); + assert_relative_eq!(value.value(), f64::INFINITY); + } + + #[test] + fn it_should_handle_nan() { + let value = AggregateValue::new(f64::NAN); + assert!(value.value().is_nan()); + } + + #[test] + fn it_should_be_created_from_f64() { + let value: AggregateValue = 42.5.into(); + assert_relative_eq!(value.value(), 42.5); + } + + #[test] + fn it_should_convert_to_f64() { + let value = AggregateValue::new(42.5); + let f64_value: f64 = value.into(); + assert_relative_eq!(f64_value, 42.5); + } + + #[test] + fn it_should_be_displayable() { + let value = AggregateValue::new(42.5); + assert_eq!(value.to_string(), "42.5"); + } + + #[test] + fn it_should_be_debuggable() { + let value = AggregateValue::new(42.5); + let debug_string = format!("{value:?}"); + assert_eq!(debug_string, "AggregateValue(42.5)"); + } + + #[test] + fn it_should_be_cloneable() { + let value = AggregateValue::new(42.5); + let cloned_value = value; + assert_eq!(value, cloned_value); + } + + #[test] + fn it_should_be_copyable() { + let value = AggregateValue::new(42.5); + let copied_value = value; + assert_eq!(value, copied_value); + } + + #[test] + fn it_should_support_equality_comparison() { + let value1 = AggregateValue::new(42.5); + let value2 = AggregateValue::new(42.5); + let value3 = AggregateValue::new(43.0); + + assert_eq!(value1, value2); + assert_ne!(value1, value3); + } + + #[test] + fn it_should_handle_special_float_values_in_equality() { + let nan1 = AggregateValue::new(f64::NAN); + let nan2 = AggregateValue::new(f64::NAN); + let infinity = AggregateValue::new(f64::INFINITY); + let neg_infinity = AggregateValue::new(f64::NEG_INFINITY); + + // NaN is not equal to itself in IEEE 754 + assert_ne!(nan1, nan2); + assert_eq!(infinity, AggregateValue::new(f64::INFINITY)); + assert_eq!(neg_infinity, AggregateValue::new(f64::NEG_INFINITY)); + assert_ne!(infinity, neg_infinity); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 42.5; + let aggregate_value = AggregateValue::from(original_value); + let converted_back: f64 = aggregate_value.into(); + assert_relative_eq!(original_value, converted_back); + } +} diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index 3148ab4c3..0e2002181 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -107,4 +107,160 @@ mod tests { let counter = Counter::new(42); assert_eq!(counter.to_prometheus(), "42"); } + + #[test] + fn it_could_be_converted_from_u32() { + let counter: Counter = 42u32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_could_be_converted_from_i32() { + let counter: Counter = 42i32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_should_return_primitive_value() { + let counter = Counter::new(123); + assert_eq!(counter.primitive(), 123); + } + + #[test] + fn it_should_handle_zero_value() { + let counter = Counter::new(0); + assert_eq!(counter.value(), 0); + assert_eq!(counter.primitive(), 0); + } + + #[test] + fn it_should_handle_large_values() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_handle_u32_max_conversion() { + let counter: Counter = u32::MAX.into(); + assert_eq!(counter.value(), u64::from(u32::MAX)); + } + + #[test] + fn it_should_handle_i32_max_conversion() { + let counter: Counter = i32::MAX.into(); + assert_eq!(counter.value(), i32::MAX as u64); + } + + #[test] + fn it_should_handle_negative_i32_conversion() { + let counter: Counter = (-42i32).into(); + #[allow(clippy::cast_sign_loss)] + let expected = (-42i32) as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_i32_min_conversion() { + let counter: Counter = i32::MIN.into(); + #[allow(clippy::cast_sign_loss)] + let expected = i32::MIN as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_large_increments() { + let mut counter = Counter::new(100); + counter.increment(1000); + assert_eq!(counter.value(), 1100); + + counter.increment(u64::MAX - 1100); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_support_multiple_absolute_operations() { + let mut counter = Counter::new(0); + + counter.absolute(100); + assert_eq!(counter.value(), 100); + + counter.absolute(50); + assert_eq!(counter.value(), 50); + + counter.absolute(0); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_be_displayable() { + let counter = Counter::new(42); + assert_eq!(counter.to_string(), "42"); + + let counter = Counter::new(0); + assert_eq!(counter.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let counter = Counter::new(42); + let debug_string = format!("{counter:?}"); + assert_eq!(debug_string, "Counter(42)"); + } + + #[test] + fn it_should_be_cloneable() { + let counter = Counter::new(42); + let cloned_counter = counter.clone(); + assert_eq!(counter, cloned_counter); + assert_eq!(counter.value(), cloned_counter.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let counter1 = Counter::new(42); + let counter2 = Counter::new(42); + let counter3 = Counter::new(43); + + assert_eq!(counter1, counter2); + assert_ne!(counter1, counter3); + } + + #[test] + fn it_should_have_default_value() { + let counter = Counter::default(); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345u64; + let counter = Counter::from(original_value); + let converted_back: u64 = counter.into(); + assert_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_u32_conversion_roundtrip() { + let original_value = 12345u32; + let counter = Counter::from(original_value); + assert_eq!(counter.value(), u64::from(original_value)); + } + + #[test] + fn it_should_handle_i32_conversion_roundtrip() { + let original_value = 12345i32; + let counter = Counter::from(original_value); + #[allow(clippy::cast_sign_loss)] + let expected = original_value as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_serialize_large_values_to_prometheus() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.to_prometheus(), u64::MAX.to_string()); + + let counter = Counter::new(0); + assert_eq!(counter.to_prometheus(), "0"); + } } diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index a2ef8135f..d0883715b 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -113,4 +113,128 @@ mod tests { let counter = Gauge::new(42.1); assert_eq!(counter.to_prometheus(), "42.1"); } + + #[test] + fn it_could_be_converted_from_f32() { + let gauge: Gauge = 42.5f32.into(); + assert_relative_eq!(gauge.value(), 42.5); + } + + #[test] + fn it_should_return_primitive_value() { + let gauge = Gauge::new(123.456); + assert_relative_eq!(gauge.primitive(), 123.456); + } + + #[test] + fn it_should_handle_zero_value() { + let gauge = Gauge::new(0.0); + assert_relative_eq!(gauge.value(), 0.0); + assert_relative_eq!(gauge.primitive(), 0.0); + } + + #[test] + fn it_should_handle_negative_values() { + let gauge = Gauge::new(-42.5); + assert_relative_eq!(gauge.value(), -42.5); + } + + #[test] + fn it_should_handle_large_values() { + let gauge = Gauge::new(f64::MAX); + assert_relative_eq!(gauge.value(), f64::MAX); + } + + #[test] + fn it_should_handle_infinity() { + let gauge = Gauge::new(f64::INFINITY); + assert_relative_eq!(gauge.value(), f64::INFINITY); + } + + #[test] + fn it_should_handle_nan() { + let gauge = Gauge::new(f64::NAN); + assert!(gauge.value().is_nan()); + } + + #[test] + fn it_should_be_displayable() { + let gauge = Gauge::new(42.5); + assert_eq!(gauge.to_string(), "42.5"); + + let gauge = Gauge::new(0.0); + assert_eq!(gauge.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let gauge = Gauge::new(42.5); + let debug_string = format!("{gauge:?}"); + assert_eq!(debug_string, "Gauge(42.5)"); + } + + #[test] + fn it_should_be_cloneable() { + let gauge = Gauge::new(42.5); + let cloned_gauge = gauge.clone(); + assert_eq!(gauge, cloned_gauge); + assert_relative_eq!(gauge.value(), cloned_gauge.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let gauge1 = Gauge::new(42.5); + let gauge2 = Gauge::new(42.5); + let gauge3 = Gauge::new(43.0); + + assert_eq!(gauge1, gauge2); + assert_ne!(gauge1, gauge3); + } + + #[test] + fn it_should_have_default_value() { + let gauge = Gauge::default(); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345.678; + let gauge = Gauge::from(original_value); + let converted_back: f64 = gauge.into(); + assert_relative_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_f32_conversion_roundtrip() { + let original_value = 12345.5f32; + let gauge = Gauge::from(original_value); + assert_relative_eq!(gauge.value(), f64::from(original_value)); + } + + #[test] + fn it_should_handle_multiple_operations() { + let mut gauge = Gauge::new(100.0); + + gauge.increment(50.0); + assert_relative_eq!(gauge.value(), 150.0); + + gauge.decrement(25.0); + assert_relative_eq!(gauge.value(), 125.0); + + gauge.set(200.0); + assert_relative_eq!(gauge.value(), 200.0); + } + + #[test] + fn it_should_serialize_special_values_to_prometheus() { + let gauge = Gauge::new(f64::INFINITY); + assert_eq!(gauge.to_prometheus(), "inf"); + + let gauge = Gauge::new(f64::NEG_INFINITY); + assert_eq!(gauge.to_prometheus(), "-inf"); + + let gauge = Gauge::new(f64::NAN); + assert_eq!(gauge.to_prometheus(), "NaN"); + } } diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 542f5d2e6..46256e4d5 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -297,10 +297,18 @@ mod tests { #[test] fn it_should_allow_serializing_to_prometheus_format() { let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); - assert_eq!(label_set.to_prometheus(), r#"{label_name="label value"}"#); } + #[test] + fn it_should_handle_prometheus_format_with_special_characters() { + let label_set: LabelSet = vec![("label_with_underscores", "value_with_underscores")].into(); + assert_eq!( + label_set.to_prometheus(), + r#"{label_with_underscores="value_with_underscores"}"# + ); + } + #[test] fn it_should_alphabetically_order_labels_in_prometheus_format() { let label_set = LabelSet::from([ @@ -471,4 +479,106 @@ mod tests { let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); let _unused = a.clone(); } + + #[test] + fn it_should_check_if_empty() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_non_empty() { + let non_empty_set: LabelSet = (label_name!("label"), LabelValue::new("value")).into(); + assert!(!non_empty_set.is_empty()); + } + + #[test] + fn it_should_create_an_empty_label_set() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_contains_specific_label_pair() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + // Test existing pair + assert!(label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("tracker"))); + assert!(label_set.contains_pair(&LabelName::new("protocol"), &LabelValue::new("http"))); + + // Test non-existing name + assert!(!label_set.contains_pair(&LabelName::new("missing"), &LabelValue::new("value"))); + + // Test existing name with wrong value + assert!(!label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("wrong"))); + } + + #[test] + fn it_should_match_against_criteria() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http"), ("version", "v1")].into(); + + // Empty criteria should match any label set + assert!(label_set.matches(&LabelSet::empty())); + + // Single matching criterion + let single_criteria: LabelSet = vec![("service", "tracker")].into(); + assert!(label_set.matches(&single_criteria)); + + // Multiple matching criteria + let multiple_criteria: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + assert!(label_set.matches(&multiple_criteria)); + + // Non-matching criterion + let non_matching: LabelSet = vec![("service", "wrong")].into(); + assert!(!label_set.matches(&non_matching)); + + // Partially matching criteria (one matches, one doesn't) + let partial_matching: LabelSet = vec![("service", "tracker"), ("missing", "value")].into(); + assert!(!label_set.matches(&partial_matching)); + + // Criteria with label not in original set + let missing_label: LabelSet = vec![("missing_label", "value")].into(); + assert!(!label_set.matches(&missing_label)); + } + + #[test] + fn it_should_allow_iteration_over_label_pairs() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + let mut count = 0; + + for (name, value) in label_set.iter() { + count += 1; + // Verify we can access name and value + assert!(!name.to_string().is_empty()); + assert!(!value.to_string().is_empty()); + } + + assert_eq!(count, 2); + } + + #[test] + fn it_should_display_empty_label_set() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_string(), "{}"); + } + + #[test] + fn it_should_serialize_empty_label_set_to_prometheus_format() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_prometheus(), ""); + } + + #[test] + fn it_should_maintain_order_in_iteration() { + let label_set: LabelSet = vec![("z_label", "z_value"), ("a_label", "a_value"), ("m_label", "m_value")].into(); + + let mut labels: Vec = vec![]; + for (name, _) in label_set.iter() { + labels.push(name.to_string()); + } + + // Should be in alphabetical order + assert_eq!(labels, vec!["a_label", "m_label", "z_label"]); + } } diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 8ee24493a..d1aa01b94 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -322,4 +322,33 @@ mod tests { assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } } + + mod for_prometheus_serialization { + use super::super::*; + use crate::counter::Counter; + use crate::metric_name; + + #[test] + fn it_should_return_empty_string_for_prometheus_help_line_when_description_is_none() { + let name = metric_name!("test_metric"); + let samples = SampleCollection::::default(); + let metric = Metric::::new(name, None, None, samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, String::new()); + } + + #[test] + fn it_should_return_formatted_help_line_for_prometheus_when_description_is_some() { + let name = metric_name!("test_metric"); + let description = MetricDescription::new("This is a test metric description"); + let samples = SampleCollection::::default(); + let metric = Metric::::new(name, None, Some(description), samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, "# HELP test_metric This is a test metric description"); + } + } } diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index b9cd6c312..63f46b9b8 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -279,6 +279,15 @@ mod tests { assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let counter = Counter::new(42); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } } mod for_gauge_type_sample { use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -347,6 +356,15 @@ mod tests { assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let gauge = Gauge::new(42.0); + + let sample = Sample::new(gauge, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } } mod serialization_to_json { diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index ef88b27dd..e520d7310 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -386,6 +386,56 @@ mod tests { assert_eq!(collection.get(&label2).unwrap().value(), &Counter::new(1)); assert_eq!(collection.len(), 2); } + + #[test] + fn it_should_allow_setting_absolute_value_for_a_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Set absolute value for a non-existent label + collection.absolute(&label_set, 42, sample_update_time()); + + // Verify the label exists and has the absolute value + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(42)); + } + + #[test] + fn it_should_allow_setting_absolute_value_for_existing_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample with increment + collection.increment(&label_set, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Counter::new(1)); + + // Set absolute value + collection.absolute(&label_set, 100, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(100)); + } + + #[test] + fn it_should_update_time_when_setting_absolute_value() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + let mut collection = SampleCollection::::default(); + + // Set absolute value with initial time + collection.absolute(&label_set, 50, initial_time); + + // Set absolute value with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.absolute(&label_set, 75, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.recorded_at(), new_time); + assert_eq!(*sample.value(), Counter::new(75)); + } } #[cfg(test)] From 476ece46e7b3b67d01e8fc2031aa0e9faf3578af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 10:52:53 +0100 Subject: [PATCH 159/247] refactor: [#1446] WIP. Calculate global metrics from labeled metrics We need to add a new label to make it easier to fileter by the server IP family: IPV4 or IPv6. --- Cargo.lock | 1 + .../src/statistics/event/handler.rs | 14 ++- .../http-tracker-core/src/statistics/mod.rs | 2 +- packages/metrics/src/aggregate.rs | 2 +- packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 117 +++++++++++++++++- .../event/handler/request_accepted.rs | 4 +- .../udp-tracker-server/src/statistics/mod.rs | 2 +- 8 files changed, 135 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 269f7a3a2..6f8215bbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4668,6 +4668,7 @@ dependencies = [ "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", + "tracing", ] [[package]] diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index f5506f6e3..dcb814eef 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -32,7 +32,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP announce requests received: {}", + label_set + ); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } @@ -57,7 +62,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP scrape requests received: {}", + label_set + ); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 7181632aa..b8ca865fa 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; -const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; +pub const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; #[must_use] pub fn describe_metrics() -> Metrics { diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs index e480be396..39b760fca 100644 --- a/packages/metrics/src/aggregate.rs +++ b/packages/metrics/src/aggregate.rs @@ -1,6 +1,6 @@ use derive_more::Display; -#[derive(Debug, Display, Clone, Copy, PartialEq)] +#[derive(Debug, Display, Clone, Copy, PartialEq, Default)] pub struct AggregateValue(f64); impl AggregateValue { diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index cc8eda903..d9e396960 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -23,6 +23,7 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } +tracing = "0" [dev-dependencies] torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 6474df0d7..3cfd6653e 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -1,11 +1,14 @@ use std::sync::Arc; +use bittorrent_http_tracker_core::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_udp_tracker_server::statistics as udp_server_statistics; +use torrust_tracker_metrics::metric_name; +use torrust_udp_tracker_server::statistics::{self as udp_server_statistics, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -32,9 +35,38 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { + let protocol_metrics_from_global_metrics = get_protocol_metrics( + ban_service.clone(), + http_stats_repository.clone(), + udp_server_stats_repository.clone(), + ) + .await; + + let protocol_metrics_from_labeled_metrics = get_protocol_metrics_from_labeled_metrics( + ban_service.clone(), + http_stats_repository.clone(), + udp_server_stats_repository.clone(), + ) + .await; + + // todo: + // We keep both metrics until we deploy to production and we can + // ensure that the protocol metrics from labeled metrics are correct. + // After that we can remove the `get_protocol_metrics` function and + // use only the `get_protocol_metrics_from_labeled_metrics` function. + // And also remove the code in repositories to generate the global metrics. + let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { + protocol_metrics_from_labeled_metrics + } else { + // tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); + // tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); + // tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); + protocol_metrics_from_global_metrics + }; + TrackerMetrics { torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, - protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, + protocol_metrics, } } @@ -99,6 +131,87 @@ async fn get_protocol_metrics( } } +#[allow(deprecated)] +async fn get_protocol_metrics_from_labeled_metrics( + ban_service: Arc>, + http_stats_repository: Arc, + udp_server_stats_repository: Arc, +) -> ProtocolMetrics { + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let http_stats = http_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp4_announces_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_announces_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + ) + .unwrap_or_default() + .value() as u64; + + /* + + todo: + + - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) + to all metrics containing an IP address. This will allow us to distinguish + between IPv4 and IPv6 metrics. + - Continue replacing the other metrics with the labeled metrics. + + */ + + // For backward compatibility we keep the `tcp4_connections_handled` and + // `tcp6_connections_handled` metrics. They don't make sense for the HTTP + // tracker, but we keep them for now. In new major versions we should remove + // them. + + ProtocolMetrics { + // TCPv4 + tcp4_connections_handled: tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_announces_handled, + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, + tcp6_announces_handled: http_stats.tcp6_announces_handled, + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: udp_server_stats.udp_requests_aborted, + udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: udp_server_stats.udp4_requests, + udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_announces_handled, + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, + udp4_responses: udp_server_stats.udp4_responses, + udp4_errors_handled: udp_server_stats.udp4_errors_handled, + // UDPv6 + udp6_requests: udp_server_stats.udp6_requests, + udp6_connections_handled: udp_server_stats.udp6_connections_handled, + udp6_announces_handled: udp_server_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, + udp6_responses: udp_server_stats.udp6_responses, + udp6_errors_handled: udp_server_stats.udp6_errors_handled, + } +} + #[derive(Debug, PartialEq)] pub struct TrackerLabeledMetrics { pub metrics: MetricCollection, diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index b296f8ec9..37b668227 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -47,7 +47,9 @@ pub async fn handle_event( .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!("Successfully increased the counter for UDP requests accepted: {}", label_set); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index ebb3df0bf..3a25fd51d 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -13,7 +13,7 @@ const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_reque pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; -const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; From 1376a7cb20166140c081c2bbf26443043bd1eb77 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:02:35 +0100 Subject: [PATCH 160/247] refactor: [#1446] rename AddressType to IpType Address might be a socket address. --- packages/http-tracker-core/src/event.rs | 4 ++-- packages/primitives/src/service_binding.rs | 18 +++++++++--------- packages/udp-tracker-core/src/event.rs | 4 ++-- packages/udp-tracker-server/src/event.rs | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index cf969b4ff..e3d37569d 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -87,8 +87,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server.service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index d5055130e..72d5e7f2e 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -26,7 +26,7 @@ impl fmt::Display for Protocol { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] -pub enum AddressType { +pub enum IpType { /// Represents a plain IPv4 or IPv6 address. Plain, @@ -38,7 +38,7 @@ pub enum AddressType { V4MappedV6, } -impl fmt::Display for AddressType { +impl fmt::Display for IpType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let addr_type_str = match self { Self::Plain => "plain", @@ -120,12 +120,12 @@ impl ServiceBinding { } #[must_use] - pub fn bind_address_type(&self) -> AddressType { + pub fn bind_address_ip_type(&self) -> IpType { if self.is_v4_mapped_v6() { - return AddressType::V4MappedV6; + return IpType::V4MappedV6; } - AddressType::Plain + IpType::Plain } /// # Panics @@ -169,7 +169,7 @@ mod tests { use rstest::rstest; use url::Url; - use crate::service_binding::{AddressType, Error, Protocol, ServiceBinding}; + use crate::service_binding::{Error, IpType, Protocol, ServiceBinding}; #[rstest] #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] @@ -203,7 +203,7 @@ mod tests { fn should_return_the_bind_address_plain_type_for_ipv4_ips() { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); } #[test] @@ -211,7 +211,7 @@ mod tests { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[0:0:0:0:0:0:0:1]:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); } #[test] @@ -219,7 +219,7 @@ mod tests { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[::ffff:192.0.2.33]:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::V4MappedV6); + assert_eq!(service_binding.bind_address_ip_type(), IpType::V4MappedV6); } #[test] diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index e9264653e..d354d3e7e 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -60,8 +60,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 09fc139cb..c3e736a53 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -119,8 +119,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), From 96bae36c5b9bae301f9567bc339a43b7ee80219c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:19:02 +0100 Subject: [PATCH 161/247] feat: [#1446] add new metric label server_binding_address_ip_type Example: ``` udp_tracker_core_requests_received_total{request_kind="connect",server_binding_address_ip_family="inet",server_binding_address_ip_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 1 ``` It's needed to easily filter metric samples to calculate aggregate values for a given IP family (IPv4 or IPv6). --- packages/http-tracker-core/src/event.rs | 4 ++ packages/primitives/src/service_binding.rs | 43 ++++++++++++++++++++-- packages/udp-tracker-core/src/event.rs | 4 ++ packages/udp-tracker-server/src/event.rs | 4 ++ 4 files changed, 52 insertions(+), 3 deletions(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index e3d37569d..5af88c927 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -90,6 +90,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server.service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 72d5e7f2e..74ff58e66 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use serde::{Deserialize, Serialize}; use url::Url; @@ -40,11 +40,43 @@ pub enum IpType { impl fmt::Display for IpType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let addr_type_str = match self { + let ip_type_str = match self { Self::Plain => "plain", Self::V4MappedV6 => "v4_mapped_v6", }; - write!(f, "{addr_type_str}") + write!(f, "{ip_type_str}") + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum IpFamily { + // IPv4 + Inet, + // IPv6 + Inet6, +} + +impl fmt::Display for IpFamily { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ip_family_str = match self { + Self::Inet => "inet", + Self::Inet6 => "inet6", + }; + write!(f, "{ip_family_str}") + } +} + +impl From for IpFamily { + fn from(ip: IpAddr) -> Self { + if ip.is_ipv4() { + return IpFamily::Inet; + } + + if ip.is_ipv6() { + return IpFamily::Inet6; + } + + panic!("Unsupported IP address type: {ip}"); } } @@ -128,6 +160,11 @@ impl ServiceBinding { IpType::Plain } + #[must_use] + pub fn bind_address_ip_family(&self) -> IpFamily { + self.bind_address.ip().into() + } + /// # Panics /// /// It never panics because the URL is always valid. diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index d354d3e7e..761b809d8 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -63,6 +63,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index c3e736a53..5588a2b33 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -122,6 +122,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), From 3f5216e382e40f2e65e8ca5d2ce40eb7ba4753aa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:21:05 +0100 Subject: [PATCH 162/247] fix: [#1446] clippy error --- packages/rest-tracker-api-core/src/statistics/services.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 3cfd6653e..4ffecb690 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -164,14 +164,14 @@ async fn get_protocol_metrics_from_labeled_metrics( .value() as u64; /* - + todo: - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) - to all metrics containing an IP address. This will allow us to distinguish + to all metrics containing an IP address. This will allow us to distinguish between IPv4 and IPv6 metrics. - Continue replacing the other metrics with the labeled metrics. - + */ // For backward compatibility we keep the `tcp4_connections_handled` and From dcfb5d5d207b9fad0aceba9aa85c4497923cb33c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:51:53 +0100 Subject: [PATCH 163/247] refactor: [#1446] Calculate global metrics from labeled metrics --- .../src/statistics/services.rs | 309 +++++++++++++++--- .../udp-tracker-server/src/statistics/mod.rs | 16 +- 2 files changed, 274 insertions(+), 51 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 4ffecb690..66bacbb06 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -5,10 +5,16 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_tracker_metrics::metric_name; -use torrust_udp_tracker_server::statistics::{self as udp_server_statistics, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL}; +use torrust_udp_tracker_server::statistics::{ + self as udp_server_statistics, UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -42,12 +48,8 @@ pub async fn get_metrics( ) .await; - let protocol_metrics_from_labeled_metrics = get_protocol_metrics_from_labeled_metrics( - ban_service.clone(), - http_stats_repository.clone(), - udp_server_stats_repository.clone(), - ) - .await; + let protocol_metrics_from_labeled_metrics = + get_protocol_metrics_from_labeled_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await; // todo: // We keep both metrics until we deploy to production and we can @@ -58,9 +60,9 @@ pub async fn get_metrics( let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { protocol_metrics_from_labeled_metrics } else { - // tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); - // tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); - // tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); + tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); + tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); + tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); protocol_metrics_from_global_metrics }; @@ -132,22 +134,153 @@ async fn get_protocol_metrics( } #[allow(deprecated)] +#[allow(clippy::too_many_lines)] async fn get_protocol_metrics_from_labeled_metrics( - ban_service: Arc>, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> ProtocolMetrics { - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + /* + + todo: We have to delete the global metrics from Metric types: + + - bittorrent_http_tracker_core::statistics::metrics::Metrics + - bittorrent_udp_tracker_core::statistics::metrics::Metrics + - torrust_udp_tracker_server::statistics::metrics::Metrics + + Internally only the labeled metrics should be used. + + */ + + // TCPv4 + #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] let tcp4_announces_handled = http_stats .metric_collection .sum( &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp4_scrapes_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // TCPv6 + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp6_announces_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp6_scrapes_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // UDP + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_requests_aborted = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_requests_banned = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_banned_ips_total = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_connect_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_announce_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_scrape_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // UDPv4 + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_requests = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_connections_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) .unwrap_or_default() .value() as u64; @@ -158,21 +291,111 @@ async fn get_protocol_metrics_from_labeled_metrics( .metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) .unwrap_or_default() .value() as u64; - /* + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_scrapes_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_responses = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_errors_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; - todo: + // UDPv6 - - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) - to all metrics containing an IP address. This will allow us to distinguish - between IPv4 and IPv6 metrics. - - Continue replacing the other metrics with the labeled metrics. + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_requests = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; - */ + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_connections_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_announces_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_scrapes_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_responses = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_errors_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP @@ -181,34 +404,34 @@ async fn get_protocol_metrics_from_labeled_metrics( ProtocolMetrics { // TCPv4 - tcp4_connections_handled: tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_connections_handled: tcp4_announces_handled + tcp4_scrapes_handled, tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + tcp4_scrapes_handled, // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + tcp6_connections_handled: tcp6_announces_handled + tcp6_scrapes_handled, + tcp6_announces_handled, + tcp6_scrapes_handled, // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + udp_requests_aborted, + udp_requests_banned, + udp_banned_ips_total, + udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns, // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_requests, + udp4_connections_handled, udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, + udp4_scrapes_handled, + udp4_responses, + udp4_errors_handled, // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, + udp6_requests, + udp6_connections_handled, + udp6_announces_handled, + udp6_scrapes_handled, + udp6_responses, + udp6_errors_handled, } } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 3a25fd51d..b42a73f27 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -8,15 +8,15 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; -const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; -const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; -pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; -const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; -const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +pub const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; +pub const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; -const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; -const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; -const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; +pub const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; +pub const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; #[must_use] pub fn describe_metrics() -> Metrics { From 15b802526a8741b021db5968af1b3502ad5a5986 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 11:31:27 +0100 Subject: [PATCH 164/247] docs: [#1579] add tracker demo section to README --- README.md | 22 +++++++++++++++++- .../torrust-tracker-grafana-dashboard.png | Bin 0 -> 259670 bytes 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 docs/media/demo/torrust-tracker-grafana-dashboard.png diff --git a/README.md b/README.md index 33fc4a028..bb102355b 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,24 @@ - [x] Support [newTrackon][newtrackon] checks. - [x] Persistent `SQLite3` or `MySQL` Databases. +## Tracker Demo + +Experience the **Torrust Tracker** in action with our comprehensive demo environment! The [Torrust Demo][torrust-demo] repository provides a complete setup showcasing the tracker's capabilities in a real-world scenario. + +The demo takes full advantage of the tracker's powerful metrics system and seamless integration with [Prometheus][prometheus]. This allows you to monitor tracker performance, peer statistics, and system health in real-time. You can build sophisticated Grafana dashboards to visualize all aspects of your tracker's operation. + +![Sample Grafana Dashboard](./docs/media/demo/torrust-tracker-grafana-dashboard.png) + +**Demo Features:** + +- Complete Docker Compose setup. +- Pre-configured Prometheus metrics collection. +- Sample Grafana dashboards for monitoring. +- Real-time tracker statistics and performance metrics. +- Easy deployment for testing and evaluation. + +Visit the [Torrust Demo repository][torrust-demo] to get started with your own tracker instance and explore the monitoring capabilities. + ## Roadmap Core: @@ -49,7 +67,7 @@ Utils: Others: -- [ ] Support for Windows. +- [ ] Intensive testing for Windows. - [ ] Docker images for other architectures. @@ -274,3 +292,5 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic [Power2All]: https://github.com/power2all +[torrust-demo]: https://github.com/torrust/torrust-demo +[prometheus]: https://prometheus.io/ diff --git a/docs/media/demo/torrust-tracker-grafana-dashboard.png b/docs/media/demo/torrust-tracker-grafana-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..090932a8c47b5fba171bab7887f7368e70b94e7c GIT binary patch literal 259670 zcmeFYcTiK|*De|xic(ahC{3jbA~hf-ph)imq4(ZVdPhY-y3#vH3rQeI2_-Z&0@8bc zP$JR^ozUCee!qLpcjlhI?#!M0*V!`+WM_xH-}SEYtmj#Y($Y|*y!qfJ2n3>3R+7^O zfo^nzKvz3&kO60~tykQDUza?!6=gxFKBg7m#Z_zNS6Y`SL6@=}C51qshahFSm%4u0 zYt#M~x_b>5+x<2|Nosag>Z!`IvZ6ofUKl>5i?({r!&hzvb)&_M%Om+}Rp{ZhH9Wd8 zV=B%_S9#+!x=QVPDlrQx#XCtVwy$xwAF_XrV`tBB5zd%DP|TNIR1=y|lAWmt-Pp^{ z@N5ybhG@1BmQJKf3}8qo@I?Rf_XZI3q|JM@&HlLsVuUEW#6lQb( zf4zX>K0C1f|2`~!KQ#PjivM}MQgFzp@risTC@6SSFpGcQ4|sBVH6DB34k<9JO|Eid^0GEj*40l`Ti*4 z5(spKT#y0;Vu<}@2?Bk$xaUH_<~ zfd4;M@yF^_74X0Q`-q_U|J)8_oxt}0_rV|}8@RypT0GMqTb?Oqji|DQ@IadMH(SsO zNU?R@6qx?TfI5|Wy=!r*sVUq%)Hh1>o@wb1gA|`>AxPzSIHinXk^PLXdH%^|&^rqo zLcO+(aXPQ_yWQff)a4DXx)?6NWDIY5sBF#OCN1_i7|{sj?A+TNjjzKw>>aQ3xUOMO z6x2rx6w@4y25(NEEi0t#6N9LM&*)mgoCevtMXFtjH_kgHX!jMFzDOymvKp7wh|v=_ z2pcwi*FhjsQ&lYZZ3G>pMmuJy_j7j^jCpe-MbfQcF~mDuZ%SMh^#ZANMR$$Fag4=) zjijb02(N&B2`=&=*p-{sX%NRMhD&^J&5L`woE>``wT8rrp260~-z7>ZQn%I^_6KLR%5eOv zP8nu?-q0FtiNouD_jv}yCK#zhBki5FXby&2YcqS$uZ`69vestAHU~xJ9&X4nPt=?4 zciTUiY&7iN2vo`S=S8nM)!|wf25t4#lWoUt1}_c9A-XdQkHpMij&q*(CO^ZNjmI4J zkXe&|9}?g6hAuny9yFJwxGSz^HdKpnc9ox97!j|m7*?MLV<$X$A!a5lAF)tGBiX33m61*JwT}5N$Gt zd*&Ur9N%RguU5St%{ZSlUr#~?yy?7bY9)jk<(Ma1pyA%Q+A$ljLkDA4%2;~7A>q8CMX;^xiaToX*mP7S-K%-g zT+(PXu-2h}7AvvHTmrYA^IA?(B+X-7CEj!u#u8aa!s^tt=5kw?(Y=B5%FVK3!TU5G zDXzou)^2V6CS{SExkwknC0CP1GyktN-s>q19v}6H-&5o8hV3Z8Ap^puH?-8fy(25g zIU~f=yvId)Grn*fZ0sE`{a6*=j)hL_raDg0mps9E#h6Wgd*uz+ci0CDgPT0EYMD`~ zVDE@UlP}0MOT7iamSz)&{OkN4ya7L%dLQH!QcS*e4PK`l`$zW9E?PJ zW082hyKv=ddWV@}yY-0{Raj;rcw;UPSM)_(*x}FjW_>Ocr z$v_qlSI`XYsR;O3kG8_J&)It3b?9(N*U!w6ANXFq1?UkTmo5=){@2c7Fo9(tyNr%` zoKz)BZvbA5Hp~P+lVv@1ZdB=S=bR`^8TzK0H6zWVgQMt87nN->&8mSt{uDSK_cu$0 zvhn>9kYmXbEvy*eFxT{-%vIfmLl}I#xxJ6m&mKdS-Hp**8Pg%XyW<$cBf@bwU8qfb z35WY=gz&cP!U?}qn@$##1Eyk~hx^-kEnp8$i`V+|Wlkx(B(_y!BBQfvgGbtPTgBBF zt;vJM3g5*b7VTL)bom++f^iUjSFbA@IE@t+h&z4ArDDX4A(V(HsvTqNe-m#^_M<+$pop*BH$}pM_3WA15{4q7j39 zW_Q$ZNtoM8c`yDpZ%Sh6EztM2PJOs*{}hZC>g#Ym1Gu`ThEt%VKnJMEVG9h?* zR}$Rs0iY3-gPOi~&sjEdyIW&DPef72#BJSgN1AWa%Y?-cy}sp|nP!ylspBto-5eK9 z>zqvBZhos9@b`MPhy<514!fbTQE6)2XEN7x*uFL*vS<&)TXI0^)8S>koPM50h_?3i zqzAK|U40&1I*0V@+)^}5ekwH=)6E{5{iEr+k)ro%nD=qIUBo2zgeh;9&N)5PSCfHK zEk^$AhW+br04ttdvrn>;LP%iri*w-~4S#J$k2-}a;o>UQlJoUM`R0JJqgQ2WUR`ec ztQfG*M+Gk91r>9|l090|F!^aDL_>L-%Kv6v=;1_=+XCy8)FltaKbu#9g&W|;?>tyf zRJo`$RdniODw{wupQwl&>@eAO7)>VH2C+b)y1(65vBvPkOku@dz2b$UZcELfG*&_> zf8K#?UE*KqSb8bwH6E!z8O>rIMh@ik6XeU-$L7WRr)Re7>0Xg^(#HjMEv-5y2kRrL zVq%ET>Z%}+Nz-b)bN8-l%>qZH2^*(V7fC$xB#KzGY=)dVs2HFM5q!qU z7r0Wg+aJg?A6?bOB4$##sIu;NfUPCls0!|?&^E486WH#}u*tyaa3CbP40wXw<3!iG zM^7blM>p*@M{$>BmrC?zi@X+I>K|BZ5GM5`q&m5> zxB{(5ed*B@52PN-NaZ#bUr-$I7X{B0?SeDQuX?dqN3+wq<0|q9K{F=tO>VTgB&7_e z82i2$M~X#riD>tym)cFyI{g>Rs(1*|uD9h>TRq(~%dW54Y%;e=zgv`fvz;D*-p{C* z%cqO#&R*po7LO0>t?0A(2b;ndt+w#-Rz>3=Hv9jw+LI#1P1HEtbyL6V?ZjoiY{65k zGEMcX!lfw`KjBc>b`r89LdScdR9v>A1LiyHL6#|F=BrG^AKa>GExrCj)(`XS{(_XJ zdt?=tIy7^%d`SDkbOy?GOLz6{@B16L+=E)@j>bv!+&5!Aq#_hHrC|9*3L2SW&LLh< z#xw1^2x;7%uQY=CBIGvOZ>D5>)Sb9!*d}OjS^nG7E*)=!>na>$#X2dp-isD4sFFC% z#3h^D9VA9z`|oaPO}+t(XEsxuziZ{M#!2sdHIEwI(wdSJ4u57i4cN7;#(M82pXn*b z^x23{!+Jxe5N=}K`Cl{8{dU3%QteiqYmknlb{;ldh?NoM|9i`dr4VHhE}x>Etiyx% z=M2h89 zT-3KQ$Z%NxUr6xzUKwRlh4y5wIb3~3RU~k*I@1HIu`X`#SaV0Wmx-vmV9ZN8l}t(0 z#8=PSuAl#AHag6)jh?PC^)~bramVO{6ZMw!+E|`cHimt3BE_kCDW2-3;9;veo}RXR z(`D+7D(pD~vQC{XepgAqYPvqtg2;7#XKY}KM=wTww`0udY^Nn^X7VV45C|cHcc~OFP9Qjd|h$z znQfUzy|z^)6~nbjRO*c@qUzNyqtPd6j;ivv)3!0tr7u>By#p!3#gM=l@M4TA+9AINmSW#}UJndztyU_QY8~6#yol9{lLVCyJ@=x|@_l>eM-1JV_{f%_d?S5ML%w ztXZ%q)?Hcwg6&{(5-JfK-*O&Stb+e$c=U({e32?1J$;beQXLd`bGO!M@(WQU6lcYU zjs+SpOUyjNaOs!K%U@y>t`{>N7qYvt^g&qFH-`z zyK35r%btXJIhBiHA>YfgX|D&ZP1OqSJ-Ma+=B9|YCyZzrn}Wr@3?BV91y|$N%H1DF zAL=7#BUOg}yJ)d>>eNDcH_eM6Vp9lz;Swi&nMumx`bfzV&T%hd{elm=)Ms2sn;>3u z*mo_n@l64H-HzC^;pnv5PzDBXMeE#|aW8|yipE#f>u$6yjSBo_AulJpV$B zen>m>+7~gXDwViv_?^jxfZ6HV{I=jf`~iFG@Z)0u5o|BV83hn-uKPK{Q}y@>1P7S#M$ReUXFTONIX1kQsP84zNd#qlq$}^2x3T=@!j)vSsz-&e} z%1v8M3+c^Z6$D8=`D|3zd(5_Px~d0)ToIR-Rx_jScbZQz|^;rF@DK=8f>xry-70@FiAb&h9#T z=Zz*w5%olvMRmy8ypF42qNR&Ri^itqFP#Fu|H=W}8CE$cy+Rttl@%Ud-sXY1mCKQw zQ?=UNDAY(rYoi=vG=}gKpg}08qN1MB(%thq%zV%Kk4yU+!2qxs6bioplEgybCTGN# zqR+Pk09xh04~rN7J7pJ2oqsZguU-}HH610){r9#1n6{U;|1TN9|Bd-lN(IW%|2)2b ze)<1D`SyPv0f1RT|Hlgd#|r<6!~fkE?%@A#ZGaRPb?3E8r4cQo6Es#_w*{vM1qVVW zrpm6wc}r|KE^j`%4EkfA2vO-w6%g;GlaDl-cio0}jg=wnDx@8}kmJK6OV;)5kG0{r zH^BRCA0rSg#qzYhW+`G~E9>~-;E+)GMc&+%S>NSbUlz60qd#eXXe)SLEuZ@N_P;v~ zu8sa^mtb~%EPWYtl}os%BwL`->J?u!B_*`9vuL=*4!B5n-f?K-7=O<4_nPG`L1z!S z1V+0imJUKo8ZlPF35s?gh_(C!$ zS(@&?vtLMi3&5%GlU5=23KDx+JtkOBMvc6ox`fTmZAcGi5-Qrm$*fG(P%kU{E1i7i zs|7blJ*a1OeBc9;5j5l6C|IQhhhrgBGz-r91mjk;Ge=AAIZuX7u)`4zh6}wc%{uz! zGJ4q~oo2ammFa32j%S8*Y>&Y`cS|gBZ`usYJjCj*v0ck0YYxGvg(6YQqy3ORI=JC$ z4ckaX?%oHe9)T<)qv9RAgm^G``7FZdOvV>|KHU9Y%kPt6UX=0xT5wyX@*^*f38Ca! zx@sJR*zx!7A`Z$pBS{{%Q9R@yK%m5ep09YRfaro=I_z!&9qZ_3R}9(u^Mc;50b&P* z5at>Gb5s5DYe#_rdznke*85l0YZV!DC$){?70lC$bUtj(k8QO~GjbimOPM<50cfnbeugv=xlmQOAB<{kZm2I#+o4 z2nHeKih?f>LCF4|zH^k2d!y?$fXpkYZv5Rn}|@zKVrs$dW9K zH1cMw-PAs`#!V6|;iRFG;dZ)pikuL!fZ2E(XxeHx@OAj#? z^52r+{#=AC$UY`6KRMQ=pBXj-Hptxqc^M9Sr(o^9MMr=PH9FQj_K zqT0wbiu5i@NI@Zda*r5WiJ459SqRJp;}!=Zz9e}Y(_h}Youupun7QtClDDf@P~9da zh+zuYd+*>fWNYLgY)*;l+nnFSXrai}mgFAk#&g}kni&R&yl3OL?>?vm39A}udyhQM zT>z}kD?$zf&#dDO&b&u#Ozjd@2N_xb52UZ+x|+i6`K-FL$Q?7Z7r93M@}k$x=Al%_ z$*%942qV%-*)~Q@yn2krHo_3Qcl=xVC0AEAFKg4Qy6>+p}&a z)YYpgM94+1j#J<<;15BE4#CnWA|a0NSJ_fEr$brq;KG55YmykUn*X&4J!eQep2dS4X(Q3mvuwX%8)PuYjRUF{P0U{CbeTOp%>$0Bd^uPx<`nS(dnM)MgS1Z`Kh=uX# zEYk!fz}xz5<8%V9^PweZ>~0RutrVU#om~N0{E5CHNIWo{lXMwmC}SB|8a11#KYU{o<+lFgCrg#Bd)`${LNlG$!uPbAQLXtI=D)!_R% z4hXi~UM5)hQQ-W#Y~>&4g1@B0t-(vIil0 zS~CU;DX^eh&sTnEOCb1|p!UdAv znpwtG7;cV^E&J(&{gmxdsg5AB?K@xz!7qcNeMZMt(ZjYmp$2GZ_0cPwAGWhfPkC~B zl*idkJ7cU603T6wNKQq$PB-a`Jx*mA<=)rw)%xzFZ_M{V7F&l#vQ7zf*T}k`o1EQS z@*cgK^(xPMzZdl^a-wYM@-GVe<~fv}f3@Pm3@3--Pm%UTExUI%CdU8P5I<(WBhjn0QsW8kO&mqS4F-J!^+y4mp4o0vO*)F00imV2 z*Kd!vzumdipC%eu)7YXQuO1hD3+*6RZ`yE4bW6?m*XryDQ*i5+*1!SlKc_fmmR08r z78BR`7XjU98)=f7T}PCV$A`V+m91o?%EO3mILws zV0WG-Ly3J?%m+-J@JInr6!%ZejE#)bVcG*wU(= zMsF|%9O|1lQ^yM+XCPKa?PFflaZ!o6oJ@=l6%h2ZM|Kyuba89jlABc@SF%vE5t4WS z2x1_QpfchatXnPJ8Z58X_;tGgq$9=of(X9(-AFhXHnaFEXX$L*D8FXMjDS>aw`V|^9I>JDp8=UyvQ_*AMe?%V`6HJS)^Nja68iB4%1ukm2>B*0i%$%fVR${#{1)kE-x=?}Q)dW~b5M z4(j`QeB!u~e0RDvAk^-Xs%u_{7`6gf%30ku$_WqYiJNj9NT`OR zj|2CfG$(pSnNNmQ0FJ#7ZG~0lg7X<4@=JyT>n=iTRo`mY3d{Wo{yrC4x^88Hx=B?dKFkU=|E@d6w z>y=A^8Jq%@NDZW5qudAeJoq-z_^V-^9}+L=Q7&|E!q~R-!IbAwjnmr;0O$6vbQEyr)u5M9|7-Jft<;YaC@CLdmBWHlU(!qbVlIBF zLaX)7rH)(n#TfX_f(EpZ{R^8oAhS{$?><%T9D-5#v>cA+^>0@n6MYX$oc+GbWsIh@ z1uXx%6O6j<`W0yQjsxDc{RF3Q9KU>*GXhk^@~3EzMd(DDjTc4w))G^(Ej)IJT3Y^W3ko#Y8A+2v7Mfz7pwU0^e zzx+<9fDQqT6aEdx1XJZB6~nC!k)iES5oSDsjeHX>H6mTfl;xht0 zVZLy2S!(>)GOKcs)g(S{Krirm(FEw!vD0d^j~*R+y?dyb=;np70pG|D(YiOaban<$ z3TUYMwRzZ4m>tnN>t7M&^`O=Rb8z@xgA3EwvQg7d^bN90eoH0=`tw&%lE-YAf+v;_ zVrha$R}>W70#g;{fZJyJ=sTX>(=UY*)OPYdA9~t4e!o_asuz{$a~G>)hiC^CVXxJC zx|Wv@vGQmfGgAZLQ>5`N7&~Jnm_b_byO^I}#OXl{eVQ}@2)``zh%?D8>L4%D!O3+n z_+_mC-q5_zJncSzS5GvF0GZ6~zFShkekcL;fq7&KIoh?F6Ua{-iyImjimqyPfhIz^At;EyqSLaNtlE9w}z6(7Ph&Y zoc0fD&&WU~K||%DSj5ANQq=N4(-rsTzUv=N@ZC0?_|lRancwlQ8o}XgN-*!gu6#;=mDKWSr*+Gp!OjtB?zPKLcdEZ;$a}jhw zo|&^Nn;128*%h^Io)=qZ;PvV@(epnwnj~H7OB{|oFT4=;V<6R%i$)T##g)bKGE@a5 zR{>Ov`j%vEYo&(f^&_pIjS=!@0!lrR-IVZh=(k~v$w~-06Og%VO@aKvT5kZPAJR@?k@ehz=0oBhQ1UznOC3N-Y(^cc#yVw1!ThX6KD_~f+sf*H;#?Y ziV<@~_#YDSa$!5I9AtW5Mio!SC!;GO?=~tgQ4Yp>DQJJ$>x|D_@R#8xfz`_#QIKFe|`-ZdU<-wRa*&eJ- z#C@5uG4HCV#^$jrzxdvx2*w!6i>*PWi>vH#*K$4sfKkNuZd87y!_t}B7aL*s1j9aN zTB}nY!+*3N#!s0i!;a}L;>OLDwl9M>^U0!`hWdGC3lLhz61TI{+>!)56n1^ZLS?o#KhTcA3!L^AAAI>DY!J z%TK&TlZK|IQ@*rAy~p%Y5JMLFzKXd|B)_hZxzYe++lsf5;HB^K{8GQ>56V)DLPw9e zbbJuEnpB->zeTQf=@-GC~FL?@S1PhrhO?%Ey9k zTLjzN7<{Z1W32F2_dcvUNiL%2jc)XEhx1u3?G4*z5>SD^(%&=+VO=Ef_nzBbd*;L5 z{ii^8X?KO#v2Xg>Nyt8!BP5+5_nlm`F!vcH>Z++dN>E7;4seKj&+N$D%cP50Pl7^X zRP`0auW)w9qQy^DdZD6$dk2}Q1L3K~4I}El?)CGGMt-OdgyH#$AG3;m6 zj4!uD4;)){kHLm@L87`BPYAx$*ewf2}D*0u)WyYX#V6p-4N^M zH8N$6uwW)Q-Q2bOzn8SWuuJ_ayFTo38o%lMhL|v&K5}DzHmONN zFF3RI<%`u6oEk;PBZNS22309O7g^RYjAnc&QD|k4JJc{@ZG39kMZR0{<|O{4rb}|X zhvv64tee94dQVh5@!Q=u!0w%#_;PQHt{8VvUp4LIGN>+yOR_a9UpwwQ4sAf{j;`%Z z?gh&i>DVQ~`;D8gfR4xTh^lA$LATg{J(MiQ1zszDzXGDK$Sa9UY?!{R995^Bkak@6qRF8Biw!tegnJcBo@x=?4W4q> zPpd#f4`}ZnJlS-gbMHWEm%fBLY z9v7Sh^-?nYR!aRLzaq;CWZ$9WJ2!>HvwhbtoOnnTZW=WV_w8@1t)n()SQmiUKTWi4 zA3ybl=72{x%s1B8eQ2#rzUuKQF_f(AGXNYB(C#Cqh=X@DoA*UW9D|`5R6dm=)6b2I zTplgiOK-ZBLN*V;%ExkEO^a=q!XuB7Pvrd7y(K(KOoxw$VVLCQp`lfo&iNZF zGojB4FWb1;M=D-bOZBfnDTL=L6z9;qFtiymjdDDecU-zOC88KhnE$L_-i*t;2NL); z^&rjVkjXq`$D!^nL`qJ|Oe166$?GxCOusO*eC_WzIS4W{H7qzYbwT=lopSni zOI=9g&pM@U$|Vpfb$>mTW95xbNB`vD?D;NlVod`ku+b|9qAme+DzV>od6BUXPayB4j5E(!qAj~)GT#T_yD~4dq?gnZ+wzOL z2HSHvKAHUXPoLgHBeuaO-VYB=Ogsk4$<-6-ew}pi0a$l^!%o}To~-K>`{~VGs`1I? zvOCH4_3N%s+_Eq}WSF)rRDz$E(ypL=JU|qZ`)fOe=3Of(9yI>~hTD^v)U(C;eY; zL8ymE@C9i+(cX%Gk3#!3D;0?fFx85yuQTRV$*~l9qy75#uA6KwN4*!hc%$ysM5ruO zr<7Q~WfE2K_(e*KjdOZ5#IypD?R5K!>d= z(@m=EvNE++0bfN-icz1$y+6SdmwX^?5ywlTDPn=ickG6p#gn{}ggg1gu}Y}kuUx|I ztAU*v5gZ>TW5kv_4MIxLKmX|rCIFu(m$Cm?wg)q~GK6`ZbXg-in<)2gpK=|!eRb~G z;9^=a@g8G7VcrnH-6Q>~r>YzwGUBRP3YvrK`$kB= z(=YU#RjWQaA|OV$=5$Cdi+-*T!17{}Ndc~EyE&bi7FybV6P<+~C(m3KuogDq6C^GC z4Xh_YJfu{U6G?YG;SQ!(36_;8r6=%_}C*yXt>3rpqCB>(O21fGU#T_rpgFXM)tb zr|_hRj!o50oP7?8iqQB_TAtuxd&qoHQ@ebm!xdod7jeLJW0$(R@Gs?9@eFFcURXX~ zP(#Q;{C+GiQ+v5mdCBK|9PwQYZHnzXmcv?JQjWoZCo)=Oat$)A*Q;|+p5F<}YpSu^R z)1P`Y#EQ2JXiJZ9qdlWOIeErkw-o>w9Ka2Hjeso&1KWBBNR}8#F1KeX9T)6`nP6&; zY$aLL8MrgLp`XlH4K8IX0|~@W$a#A^3_o$5<1-&;Qp*DieSLv)&P$-@L_7P$*#azY zO7SJ%+1$|(7DXYHz$EQ^#1K0p`@z0ksB}jhX&x96P$t{{%1-OwcyS_HR}0Y8^J6nl zp41R`EkW;M%&Wj^+jhtjiGn?Ez!l^&WOTDfDgv}7idq*fyRsn1X0tCLiK44bb6)`3 zO_Lek-;N<2uXqG&ZR&;SG%I|Z5c$DUOthxCf=UXi*!qk($S;-caJX}(; zg_m_b8bl?`{z*}&ur@Z>OZluy(Mai0luZuMMP2ylBYzLGyF14WVO=miEDLG#Z0KuT z0DoLgq-q|U-K^R7%4kVSemhesDB8H8q&8d;rWB!4Yp_>6%XTG~+UL&Z0Ze0gwCnCP zLt2r0{SalCGn+HAi$tuNldv1`xCIh849`?MhQIV~q{&q{TfKZfNXVO05-?Ya9E@p#3uliftHK0~p(D{BRpm8eXP~4M!ca!hO^5#)K8fu$Z zv;b#2Lwr#K6yRuxL@Dvm$QWI}Wl@(X{XtM`k@egch;zLhZDXsq_H%w*K>h`E(5@TR z-j3@_&g704^FK_ZwHt;g-#0P~`nA=PNJ{I{o}N=VaOOw< zPC?!9EDbU9P-_8k#3=$2tQzilo7d;tG%n&cjT2?=>?O_tHfjS=LWv^SJG@Ews-i&c zzB?cRf$1;LO;+?SWNJ5M22Ye5uXr;lH2#dc7nr5HxX0c305==>Ptc7a31T=xHPbqT zCRoaels>+#S|Qp;aeJmMGvRuF90xYLG?x3J9KpCM%lx&~%ODCjpK3!*`}r;_EuM3D z)m+XqiRWtVq520RZjMfZAtCt7?CzuzU;x9t0@nJW;~1R30Gqz_%yR~P8HS72V0FnEe>3^U?7_0u)^zA9>e zO@;e<#N~p3e8xq|q_eJee`Di4EKTq*^^ikfF^u_YFqx~r8ks)0s|7NKIyRb4t`oLM z9Eis_1wZ>Yjj~IaxOc}2$H$x;B8+(InF_Q41VGnFxW@{9I;=nWcbKO5nvw1|!kemp z;{x(tpM$h#)&>zfk6Y*Ll9cPXikx;!yGh}P_u{DPc!BN>m?I1eS3U>ilO2|nuQ7FjG$q!CFhb8^#9_8q3z zcif9d?s&d4H3e!h#LQ@**m*Clpo9~RKx_Ic5gyn#A&Gm#3;*Eo)ix`k72o>}5#>Jr z$egs7PeaBpKfUS~P+vbX?$o3;nA9co19Q87+jFlSh}Fx%*VQ3vkA;ka)*Lj*LGL^P zno5`GEIM^>n)VDr?TG@o7(fz1vZG+j`3X z_#w=~peJoR^wU4L?E0jd;<8@PHxJO+kQBk%;S-b6kIohk3#upv@+LiJ@T3yq^{vyy1Vc(Cyyk^ zFZx=BU)=69hAsU1_%??!uj;K2Jc9=S2XCaE3{xELbC0?QXw&vqcS*3W#CMy)Wi>BY zy?;5Ge8g^=0Mj%wN}U^|^Hv@RYFsOZ0G1$P#oHGLVhm|0t}N@ARm{L8%U=OQaw7&6 zmZ<`9wG&_enxf5n`K!|1k^j_zr^uFtw24L_Vy@G{|~h4K@MB zSde8Cg+djIwlkb(HHW2SA;Dz%n?Uhesf(!t)DhJfDr)%Ksld~Gpb~W5xTO!s;=sU7 zbhG;L>({yaB8OZE71z$1{$%=+_~$4CT&|L|C(G?ftgXX$7l9?mlFR;9d2(p9bby^ zXkA*q56lq2ggn3#`8A=A)k_!E2Mh1xOc$DE_!DhaYoEq-?8G+p(DmD7DF3!XzhL>8 zIxt4pRk~1l?Q)ZB{>z#d6J|8rPo$W*Rg+%eW#9kQ5-R3@{|)?J!>CsDJtw%S z#y00>%u~adv-1I_{=L!WAqc^@m^Yz+Gw(oRe%XYuki8j1*gQGg+82(b+`So)e&hSM zM)J7K1W;zv5k-Bdy+Rnd5jb-di>t%0 zg*fRZ9)_Q{g5E=|bB)s5b~BA`hIFfV)_wS(^N9@K{h;niRqTfk>>E=}<=?67*{coo zWPX2CPAvRdlFDALZ>(d@og$gWn{8kFsbZHaLL-$u_{P(x3vb4bk`9meA;|M)?C3kj ztSg66mv;SU<~OA?jJ^}v`wcVcK`jt)wEi{8H(|csx3?mH#246P6ff8|9g$v6)h*nV zdm7;CwEes1`MI4JC~xzL&4vbjgF7>dEXZ{ddQC1PyqKv)MD!DXT#ne&mQj$OZ9enn zL=F18-CvrVW`=H?E-RXne65=wC}@1d9* zr4*0jxE8KT-8zQ3(=YbWFUXSgRH1ss+nbq?Q^K0K2r!wO!;xEOGsJJB-zASne^5<9 zOpNaOCN*9Py}SDC@W7nIZVQ{SbLiG>@%BmN1+MhOpSb$1{Og^K2fK-XRExEHS{VcIj277rw;U0hQ@D017s-vV?WaCHLn?6f9}n&u@}^ z_x}EX4+@z;k8V@eRNTJD{V6=4D7yNdh<~$z(r@|;S{%#9#%kN#?lAjby}Sxq%PY23 z=ve!^p1fUk8~6vLdd5L*d$WM)mF>S-lKX$kVbpW*a9MDOuN-efUiA+K2%|Cdbyv4h@7Zci^M;QiyBV7ghj6@lL zf`VMObPdlQ(o-ls1r02EB#HV7w^!T|w-vR|UQX|_+xp$}ail4M%T6HK?3Z1R$kd)$ z>m$SVr*P8u`A;H~hhO1eE?4#Bme`HpMl9f;eod(X-<;88{&g|YMCm^DZDJ?LoJBI> z{FZ#}=fZHS&KBC?oJ11wSt(A?O z!)faa)6|a{A3m5db9&5f8OKrxryADv1>2xy6aO-CTfuDQIW$)u;8E%uWF`~KSY!Op z`k8mPo4V)P&YAVCH!EQ(JU3Eh|Kma7(iN?1;hfj5h09LSwovf9PaAA)c7tZbx1PqQ zb9dc;WSM9_i#W(M+(`#tGJFYMn==|eV(Pk9y+jmzBy1>O_`|Qd;a*pQLw*i8^_9eI zK`WA;RmxW_z(i#HWnZ4$m37+AX6}NX!`liZnlsmqsu7{_oyl+l2ex7jO0_a6!#s}M@<&BhcKkbut{Rk6< zMTFjG{BwLT>RHuE)UVujyCHWbWsa?{)g4<#VGr~h?vc~pTYHh$nW;ljpa+$x&We28 z&CP8fX9@^4jn8-w4}gT|=jGE>=9vd3e^@6n@PE;C&0%%E|39pjZ5t=swr$&9SjKWK z+gi13+qUiHTHadyp1z;!cdqqU*R^xb^W69Q#ruguDwekl+<3NEa-6>IPTVx?(9Da1 z!$>$4b{U)1@6HWB$-B-#*vk-1(p9JYeOJPM`ll`N%j=HwMMM$tbLfqO(_PjBR+iEtco7hVqO__gC zX9iyG&gk7Jp3_45CJtg0$>OFUS5oD$3^Li3w zz`<>6O}MWH0axUC=H(QE??>!|Tvmv>WOny#Vt+)MPyn)#Op$_8U)#ZZr(3HcrI006mCw6j@U?! z2eY?5*D&wm46LqyNdxn45HEipRxds^bFwE4)6rP>QC1MjPB%PlHU+lnWA(^S?x#iS z_-FF@w0jW7P>RRI?OU1s}L2Qd4jm-&74WH^~OyWjr&;Y zEg=F;#)fPxvcXA5oX|+Jru+oSiC43^(y_sG8R9KIegA>>yt*sn>w81tjfXe4u{&fD ztB^ip!2*0UXVxIL=!$4Fj!`-=o4VTM0XM3lmK{3>y62Y3->h#Y`rk?47W?V_yO{N6 zEgtKaIVDn6XyCdm(Bi3w?hU-s_Se{v7$!~VvxWgIz?rgyyGX>qz_=JWB4RS^Fj4F4 z2?+2e==qsHF4CkQp7T7d-Wdoyw8;Pw@rOCCk^|ut47P_m;tw^8repfgxxn2agq+zo zV(frstYjqUu6980<`k4>WNfpwb>0{j&H8x(d?w4-EovwzP+<_tw(lR5T_%b_IvN^` zkq*A*-R0)mz4yn2yOA-dlmz+qK>hhK^~_(axI+5J{qj6dED)aqo4mG)f-j5Bq$1c)KK_Z zZ&vE6Ly(>`7|w;=qM)K&8!TuXI4cmcpUd51+=QaInj>5*x%zh}PfvTD z2d8&d9@F$u?gU*Lm><3Y(BqUdl(EqwW^3$Nsjg7-E*$L?F&d4F*o=IZ zIhv)|m*?Z0cf;9~Uh+)j#E02B^80gWZK>y73{bEY)aVUAmX-MULsh?xsKOq?fCOhK z(V@&UCh#(qAuH>;143PUM>%C^2EnvR2f@Ck;7CNMC_5u@fc@$4C{$bl?*dk$5n_(E z=ZXo}Bt2yN*08KE*v+JoLMor=EP_FhPpt*LV`8*jhEr9riHCadAi_D@Dr&8JQ0x_&Mz!Zn8$v&P4 z|NK{(Ht4c5^)H;7_(X|TWlgf#*D^X50bU8N#)>+>_vy-}w-tNIOZ73)KU@mUT ztrC+N3L)v#BC_K9ysy5vOQnMUUiNbmx}$%y7~ya}bp`oAUj8u8upJxdtSp(Dt^bXW zKJa`S!%-O13y_@7+BVD@gv#oQm$S1QzPIMW!2(lf&^wAN$8KJ;frGRtPnzN$fc?#( zG(|(*x1<;N#Qk!5ocS{+QnlN$!hQm-6F}}HzbnlelsBbhG9CD7;ha_-m#^FuFBAkN zv6q$@t^)8TD0u72s?d%f%DMJ-VnsIdy>K30nYCgqJOYA3X40uVnXfV+q<^8$R2iTv+eow?!sAmwR!;9s#b9t?u-}2$^X#z$6va*sw74OiGO!Lx zgcWApMz*(>q5@y2;`t3Tk75;e>;ObM;cw!?~X0z=HkY>fM|yqksOPmxl&B zwE>@qPry(Ob5)f&DgkD)SdENW1Ja zvSKV5UhOh>IaJCaIht>_)!eo^sG#Jnr_DtJt{zNZF@k!=B;!%pFTU_VtyOMa|gai$&23kp{z8oCK}q zg9@`o?OnP9F(@GQdq&L56Hj#kz1J3OhdSBs7zp=62PRCCZ9AXg$|v9Q{Im9e5K82B zYPYqm9my|$?>Ro&qQk%dfy*wSQ6d+OvckdHeGL+%((0_0W^2YYx=m8hXt$W+s zSwv$`j{B#Rus>aMYz05c%EjJP*!*KyopcBWk)vsKVO9U;T8!#*6#3YXooNK$FniM;VY06%)w? z7O&aPnns{`k=V%q(;)!-qZ`AQh*DA3{@dzqM#=&SCfI0$|M7F)veRQjx7}lN{xhKC z;ML`|K_tx?fTAtt4XY%o`5veBRh`)-D6pQTdhn6Q&C&K$Y_8An{s$ckW?t88C)z!B zm}aM?=L-f5wd-Gql#a5!Va? z^9Cy|jX75ad(|Wh^W`4U%}Okmh2XBcG5h#$iAVEO|3(Jx z!}IZD1)^DRxjl4CHh)CHm#_Fu7Pr1e`NC^3ffo1-p-sp|+AXSgNI$Skp2j0q;_{{@ zIH=%En-HQBEiyX|=_$sUuz%*~E91TM2*vJAng(Gmc~GfU^1mCNrEO z;-Ft&O{Jo!@Rk=;nlV1PCEuS<6Ii7dma17tbYfJ&m6G^y~p z#G4q5#7ekqYu?yV$|B5HORM0IsmE8`JiQ`=K)6RTybF@JSlrbE-P|;7oL_fM)vIdb zlkUbIqPncwA|?U{M^%^4UeBxp@0Y-?_4T@GWw?7Sg*Cb2>a2Q(D-iqh1IsKWiW;<8 zVz(k_Sbn06G$^`N#uBlldv@j`QG`5`x#|QVdTLHhq%>I#s+2&;tO0m{zK7>U2JbLRLUzVV&j1|s4kHlcghkW&yMF93)nH{_~5dR z;cS{QLYcBObTaVq8rpM(Gv{0yybRQTD0TuaTL&bT=S)$)u^^Sz`Ern2%@4Gf6wEot zmZiQ9P3o37*(UC;8z`*$Vc>=7P z3v)!zy;(_%;{_mpimy4LkQ-TmlwA)LzWsMvR~R&vF+}0d$`$%v(BSmv=J@h?XpTBV05qGJxL4Y{P%i zu+=L-Frt|f<>d`qa~7$a0>pQj@@mHw%3p3JB}0FFZ++lBGTXfV$I3T)x9)M?!29P0 zW#qTPK=zx2KJ@3~!P@(4j+Nzt2@)0#(fBdV55Sr|d1yF1L!MLwFgs1L@%}|u<}d}= zP90&2vaylSe8rf6s#<3AAK3|0@^~m*&ZoHttSv!t`ANJzecn*=G4Y_PKKLxio0TQA z;o?BEu(T|U>u#-L8ttmTUX*zuxQ`AyApjV%q|xQr&65Kp<#q^4tYphNzl%o4(y6v~ z>43KQ9-DGuZz8*AhLA}3qow9U>m%Vw129=q{aI_tF0Gap9fgCV z82j)X{wkdEaBx`68B_UB8F}^o2U~=CH%NqYM}HR%yq?y`Qv(hv4#d3tHjW|m3Oyax zgS|Lo!=nfk1A5%aw@Z_jAOr7zqjhr|{<*nNx$T3fmk+N1&%17exW?g*S)5NiU0vqI ze)Bj_v96*+(CyafH{1iP)(;V+n=?M~m(yV=V@O>^f`EjHu#y@Fy!Uj70{CU-_rC#2 zsOI@PQb$wQovI37GP!j4!9$;r6q!B+us3u zk%1*qpsA_VY2pa#Cnwm=D0^IAjWO~kQmP9_?K*|CPLhpE`y^l$W5^z8+ZF`UF926F zq6M=jKK`V#rko{NWrm)KPqVf*%8csMD!w1MYQp7LL$j|BZi&UDC7>c@xH(}yB+3)i zvZ505e#f{*gzKR;vH3A9)QT~xr@x*4m;Dtvj-#fM|$%wuEzV!QR|0?T_Ax|6}NN8^vI@N9{T4r03V`kzCon zlI=HhGs+976zaJ32uuAE7`oi!9lHB6{8_1t&%kE?I`5EAsLL(OOb)Mt zlGMZu7C7RuB~0eT9b3C026~eSA;jK>{R-atBCIXabbC3>u? zV+HKNh*NMKwTT}X+k`Xmm?7=54la?N^A-YMjd zG=P;zEtHqVDlglU--h|#cxfv(ga_uO;)b%*eqpO`)5|TCo;^U0b%-s*;`@_?^q{(Y zXvd)q0!R_|sw(Jk(91mcm^htAsCi?NuRW3T7`DR?9i>_&A<8~%N<@$|()*urvB|N4{A3MF)ve$j5zad`F^>&C|tkey- zuSI=3#ma(!mjnn|00@6wpAB_i`Z(Lq#IE)n*!mjZac*8XiFHiG_I-f^%r2Ab$WrHx zjDc3S8*ZX>fT3>FFoniHPL zBC4ldi8qp5ri{+J#pV4OMCm0kW|E9bZ-?2|iu2EGX0jST;85Y+8;TETN2w=HQ^7u4 zQadwe#5XIUm|0N%Am7Ha2}acTqXl66{_ru_)oI)2P4-_eQ)nkXH)&mtX*eQI8&29!r6X956xFO9TI z?_Yx|o-W92gVxjkQlk4uRGa%j0ycY&;07B0_8k&zd;dXUwEz+2$epCv z8Z8p&KU`O5;=Q^*DZXB-h09Xu3Uz+%D9AbJSFJAQmFEC9mf&ZBvQG$pF7L#{(6=4X znvgv!>tRMzoS1{&qNj&H;bVDr9-%KE9W83C8GOBY&*gnb*Nhi2Sq?f5$JK*zaA|*TNkn^b~)bd zBDPHy{&T)RNZC2H>DQ-U6vWd$HlJ@pyz|PV*wvTBK_b=0Z}7Jd8dQiK%hi)i|71(4!F~0}TC~Pq#r%ME>OY|oW?u1;Kyf&K5=x;to7mM; zBli+<+w=6X+px@*W=M|7`jCz1ZT+=%7Yk^%QE+zn9yG5+veQKiOC()S;t_-MwHxFX zL1Sasu6JMmzY8#c(M1Om0hXNKD4@w6flQ+Jdxb(0DgK#(L?pg&Zpq(NHCNcUF?;B1 zK2MA%r?z?KNY}4LE6J4wLthv}TC#cVW#%MzbDwqeuO}vM&*s$i(pn-|e?89~6qIvc z5z$tI$eCM`2dYYQ`^3@=qVvc8*;iH#Xi(J$LBvhQ9Q-@^u9Hef1n9Mi^3uA1k)<1a zZ7l~6E*y`(tdD2`cvl3W+=MhojZrbDtYYdxu-<}D9w4x#lOdLfa=`+H=?FzYiyn$^ zZX1uPTIGs9kS0u+>xdhbCLpiyFm!G%l6?`tH5Hc5XWiM^U&)To%W#dpqcO4 zeEJmP-g1DK6ufnEaKda8^s0>b*NcPyGq~-owjC~c;_2rkx97*jG82>EOapv3 zug99cw!aXd=f^ZcH|yk~>mC`!LkPp%Vy#%8Uw^fwoF65k0Xw7UF!zT9`>bO+T?1=% zd9z}>S3%c~R_bW@sR(6|vW$$rXmBmm!J#KjXqL7VvW~;3bb_QUe-Yi^Oz%HFlM)xA z=$}IA#i-)+y_zL$&ooC=`)Iu-JN-9>Jcw#obWSFZ7Xsuo-Do{@m?Pa!exxE6n=(9H zxEa0DztP}`nmH8I|0`|xzWrN;f7=u(^6ERCJ{#aBhrhiv=OA4T_4|w?V1&d#6OLL9 z{&{;6$8b*9i`dCwBZd&jOb$nB31l+!EUF!Chd*;U47pSJEPIw7{22Fq! zyD|v-hDl=NB+JOK_wvmW1q@i*m3mycd(OHt_#xuJ@_IhWz3unNLoefDlA|lXk2#Vj8eqlb zpR_09!buqI?(ng?f z&y1yl_6fl{TX`a|ui^Hs+|<%Xf3T!Z<+oKM^?7r(ZxN~agwEOHUDt_l{806>_do-_ z%Xm_lyYjs(js# z6xU*Ha<2^vV$xN=arY*!Nj)JUfk7?s#_o4S&ZSdz))SasWjzMK7@)ti%$l8TC5T)t zjeA;=xcBVrCd@hggjp?nu1e_D^z)w{Z}*buG;=&?zFRM0nakuusO$KO{Pc3?5dgR~u^& z;%!qcc!CCM=D4M^(JR6BN?plrKt)w~p8|u(<|3LnS=zvyI)s!eEc^1Zsh$|R2RnO8 zW!~o3H}NGsVfamfFL)~)O*q>F*z7I)d{Zm?z&9mOVA%ZvioDCfXx}9Iuhe76UsFEY zEo}^30P3*H8lR&{p^J@Xd|-mN)^j}oe8b|6bK(6hE|~OHWmcPG6fRMYN8j%kw#0i@ z9CGd;K-ht-3&;}(uA7?o)(-grd6=MQDETa^FrcO<9R?l97A@m@=xZZk87lWVm_~+l zLSOx~WYuGS*@v}z5 zJT6cT2>BIRdQ%PfE2TYC%KZv)0POj*^998r8}9WafEc z7K@f6L!2?ZMgd{3doCzyj8cp3UQ-4P<+h3J!gltnRH;@X9FPF!>ZhxH!vPv7&`Wn1 zt=ilr<4PdzxKn*-Cti1yolTb}n}m~4w6S*}&~y~?TUDj@;@Hapvd;=VPy9cY@ZZk1 zU|u%EICCYnwa529W<%vSqi4I|ECQ{RNa%G`M2~oTm}QVDm8PjC3gS0yyE6Mi9f}Ka zdrH1swN6whD4mG!@3Nw6%(RuC^;Z@}ZM$hdUjZpfYMA6}cVvou*>-G}-XuJQE`W(% z6Xjy6cYJMeS_^KoKO4VgR?!7Ybo#5Y_KP=vsr336O7=30b~Xv-Jo8J(_q|_`0z)O6fld7bq)blXA?~jRJ#$P80>1Ydg(G4EEC^a{!>-Gg8tHdge@AT1D}4& z2`MOAt`OovkBOF(1ng$$nWxib!17zvYMb}@nR8W3$uIxv5yF$WgE<|dgtA1OiQ}xO z9)Qp<=q^4OzK?I)Vge4ZVZa$~d8SI+Z)C|6eMRyH11~a`5WZ>Rkr8=%GVom>qOGnK z-o}?@`YOoJdy=N2CdF#uTw~wre6LN639RZgtBo5 ziW&~QWZX~!a1c!7#Lyw!AFQh@MqX91)Yj;yOVqrqZ9m0L%9GfkyX*rg?lsR}0>LBK z14;&bX9YF3W}nPim1_|@KLNnTO&_yhNtxIcIm6DOB#JcDV0@sjg_PWs+dDYD7V%0JBzyF8%0jfqP+fb=jl*TB2LnyGg^o%_+HD!9ZdYPeK(?u2yNFH zgLg&hZ4 z%sSy7p;bⅇ-?nC!6Cry5iA(BzS7DUw1EBFmNRpx{a&&2vzaYatU0GyJh1M+`^Ce zq)8Cz<3dA=N0l`DVC?e@Dfwbet@FVZ#ct*jxZf=cVQP6}c$H2rSYx{9Ae5_FcPg0DJ;HDFmUs1`h$& zbDuVrWJB@$=bGD4hO`Z|mRD~Rq`Dw-oRl*BwTysSF+H=gR3FBhnI!UG&Kx)(GTk9* zOVk>^hsMQ!nzh7r%E-Ctc?CyZjAfNVG^7ZYpf{RQ#DEx(-`p?NVL zcjSI;>%dq^cHu}eWfQ*G85}HX0YJ^Lo)YmekW>zCY;?$LNKf7rt-<=*|Dp(67!D0r zt*hSp`>bo1wTB{ASw_g8kS=ontoFgr)JKHkwMt(uMsa=R(jfP%A2v8CU8-1VB;ja&9GVrsl0#nYz|HQ*sXoRHVYrwQ3Q)r8mq-h4KK;oUq&JkH za8)zYC3zPh)J+eG%g3WLHO1}E*v69&bB4Y#HNE6e6aG3(fUm))j`!EMir$s1SBn7U zTP52m1yHksyWUz1ar7*@zy1iog2qsNU7y*pC@RxF8kj)rY+k|A!IG&n_|ALF{#I!Q zzrBtn$q*EiX<#@E6lC|hpLOVEIA2rMmQstlbzwKzcmtE7VSTOdJR0?!Gf_V9C66lX z_UZO6eR{2e2HnGF_SCj4^Ql2K|FC|Dd%5!)hfd``47zr}<}|pyf>=wK52V1S_G}k8 z?1EGNXbS+{K~Nyf&;BcDpm3%drlQ=+KD*O|9&##C9)g*M8DQ)YOfOWf?8ludyX#J! z*9R1$yuRiW_H3ci+h%bm>J0YCfBz`kNHQh(Q8aYM{5?3i={YQr&!2XNqE%VNY%EZ!ahlU{N$v|Jsh_ z2Pj)Y!uT_ULXTdE&O$JE`X;kgr1G!8=N%WraQGN~O@KASiUP6?%?9F{gbWv7H`7l_ z43n8}o`98YT9BJK_T``8+k7fwYxO7CReNG1yV;S4qBR7-?cqfAl z4yeqc{sKH{K4>NB*u73{g1^PDo3BrqY^h?Y0H}I7+hU*wYednIbnBW<0)CFf{_)`8 z%T9g|HN}$naEFDIzWrxZ1B|rtpUk?V%g9#4K!2F!RfObQ)<66kQ6sy#O-Qh8dCI!2 zg@~+NABX|mV6ylXCEkV<^fthd@qT6}${-q{gT!}f=s_gJdJHMJC;OmGK&s43PJOG!BT;`Az5A>=4O}z? zsH+clPN;6L-IBlpPyRw!S9p>t8^xuzdk?bVW*h4F3iE1UCN?nnU1?FiXT4P$`rd4^ zl<(w>TQd9g`TU8#bpwV}^s+ZLfO@k)3Jy3HR`aw7^i)901eL64HWAoFcX6*P+i0 z5@DOBrAK^gVDz`V6%+@bqM%NRjMVst93Td_vc&+q0o-1Ir!+2i?ABCBxzdKSMbl>M z$?rXzetg5#D;o+_i&hW@7!$JE84u9s+*5W%BF5(poe(hm(k^hbvNB+F(*GGq%($q- zT$p*nPxY~^u+_Lz_gVb!Up}(|Y>rZf+B;nbvj=cPQ-_H++zvvU0t~$!cgao%$5&tU z9Vr#p=Vv2&&N>kQA!e8R7?}dBo35aw^h?>iS{>=n;=jk^hst_jgxzmMd%l&&a97u z3vI!t<&?ezi%-43^ITcL;$Xgd6l#w>@@#YqG`$E); z=C`8y4Qb@F1E|s@0#+=5r#o8R5Q@eyJxZRs^dSM4B;i7Kh{&XgM26v;t(iBR;X2yj z0(=8haXR3%&>T_qY4GGHYBc6w#i~!xA>ooQ&xQENPp99?;{o2ofIv!7;6Rh!USbB# ztuQ+Z89Fn!SyN9RDGn@#&yQoblV}&AEP~YUBRu9vwv54I+s^aBk*uj2=1m1Mk#en{ zFY1hWnfX+b;~%2YR&Z_&CsWj>9~DKK3l610LFCw@%!Os|A*U>=xvZ`t3gM!Kfbk|V zNSC*BsEt`eqs{h+1UfwgsCW)`^$U;~6cbQVW21TajGAWvTjrku-wJ^am8$5!IRs$- z%e6_zOM@)_tw`1GEElRGN~1c#XR+%~;8o=VuUbYF44nuisxH*WT@&rHOW{MMrE|7k<2p-XI=4)L^rn#xp!dihB%6v zm^_WrR92>88s!WCPNL@#NK%{l0^H^^4jgZ`qbrN%h515xyH+eC3?6AO6ptFP{Z5}UHwtDbO;61fGMYY`+=1rgP}I#w z**uLsq&`znJ$)1n(YKloC-=J`HgU)=8%&I3+gPrzgOo3WyobpzjrqBkI@cYD3h0YH zS~SmEs#W5Asd9`4(I`*)ST9fCx)~*Xt0LrR)Lotk{|VyG&>u7GQ5>gAltku007UrX zCMpYup^B#iv2JY?Oj4Q}qhi=sV^{G66O5k_f158o{-A;2s@dt%Xwcyw;!jdw@BeEC zv%+;@ql?$eoNDxlC0}bO*G5^_7McEDU$0YZt0ae=>k`p@;@h=1l{F>~sg%Z*`j>S{ zQXN5-l~PC|Oq~mpj+tFqxtIK@Lx*$$j$1|+;3M-u)fd_CqIqlnyo%yPfKYdU`^Jx>`m0-*$-;WC+2AikUPJ|Q?cM3nL!jX?8_ zKmLb=S%aK|H7kiqm2`<9)J)PG6##C?5gzv^!j4ga&7IvqbUM{#6mZ$i;fxt1cq*iF z*&gbcl!%g|AdgmCJH!(@34*$T%*LLI6|6*
H-X&IZ(dbiJ{hMAWhnoBbDL;AJN? zxP{+RcSz?0F37p}@Ag5x{K&@`fqGE~aIkUK~a#!$D+6lO8D)4y_Jg5WYl4T~mH)baV;v zv>AvEUTpQ#nN%-?HOqmOd2<Uj%EQV2w!O zo_Km_KO-X8fOmh>lsf`Wp zpq{~zckd<}LFHQOL4|9Em_;ERK>cN7}%r_DWy=-)8rh1Nbp7 zX&*qoeoJjHB13Cslgc^BnsV-q=LI{|T0eq(`1SSK#o{Ht?o1UM1Y(q!(fYmc1Y>&s zD2g;THWuP}a)P!|=}J3W3A()?!Y z;^nBnL)A}qA!UsH7FJefRdp7Z3~O2gMiL=ei;PN=Mdh=c2kpkPOuN2=WB1oXCTfKyjm22a$85<7Hf6<8m^!b_&bmW;DGjbbb+ZK zA?Q6eU?By=ioMD#3J$odEaoYnKm2PTVTXT>F7u(0ag{?8PTz(;2p%yiNw})W(y(`# zIMTBJK>sm0!PgdrSPpYmn0S0|1G2vJ1p*hbv_>#4A;?L`f})hWT=55Da}j#h?p(8F&J6#C!jW^e5X*%ocUVv^ktV7K0w|61 zVfu_~zL4-rv)x%Ej5|2_36?)o^1nwCg|UNqC71;`)-YiD0oKqo;ASO_na!VB{PhAy zu)`nM-~*=g1l~}T0S4&|=BfWfziDi-P_NZ%&TWu4TJBpN9o%k&btUSV%j*!q*J6kY zj3`xk@iv#FGV1GBntGLdJbG+wQJ$GX((u9kc4D(Sl5&?|L5KG>arIc@xed!amyOX}$9gR_4u``>FMs_e$lC+eJFH4?6`rUOUE zQ!1CW4*SUl%iV}zWX{Df*?5!vPt1BloZw8-#!B2=y^XHJ<<3FSO=RhunhNq2a83W} zV{*+uHw?Y3eGw@0$8Ey=C4B22@97o9g2t_fmdeoayjxczmAHeZTel9IFj)N;(LWRI zA2UvAhEi3C*p~DrSvUx#N%$(cW{e?mR5a_(&nNzzX z1T6%Ft-4lWyd22g!bfC#kj*=i2w9-9&fI!9zfMs4xE+$Q*7O%?iGKYcGKQRp8sq8=@Q6cdI-=-M`~Tv)GS!*|1VC%t*oD zj*do2!GZG<2lU+e_7|1CZ#rU)bU>+z)pRd!v7s3WWa0hp$Dghj5@2xP9Lb)`O{ZQo z{h5i8V0tnCUDYoDG$Q_A{9x3e&di&nfAA21MSnmN3P}fZvB{uu$e2Og@2bOP&<+Oq z^zMYsbi$IaYyoRUT7V4DYd0xpGOi=GM?onkYpNHUr<}`3f){W+mFBP8pC5y^SH(egWi@rGrWM5~E09G+`tj4OzyEj2C-*Z_}$xtQ;Ju zz1kx7!@g3MOwJ;IeYtNVE7t`q!LXoVvbUev;*1rKA+E2+L+L>ZW-b$wn^=YEWVORR zW8)o1+%{(cLB*<81$-i}edo|nz%hP5t8*`Ao%88dVkLJ&tcio)ndZ-T)re&y%q}A`%CoBw8uOqa`Sp9y~4LNgqta1M_q$7k3XOX{8;5PW`G=8u_54Gv$ z($xH($}n@v(8Qz%zaB}x`ki84zFC#aK8D~u>$hBGrfoHh&iA#uySoH~`}8y2RmWSS zQfGVftv&(EX#DbaFT@lx%Xq=>%L1h==si>Ao5FpBr$y!EhS&Z1(uo{!$h)ukokzH? zFn78_;#01>W&{Gw)v-A@hm97_WQOZUqjc_g$MaaFZ{GR_5E;Z&Dfk9xcQXeJ*Y+}Z zKRuLkqHw2;ZX+{@yvV3KvZtw}aNobOfv(0}q@m&5N9rwwSxeu@`#SMP>R<~>v7%9U z^|Qi)WgTnE1Lj%gV}@eVP;)+vhacO6Pt;uXL1es=hQZ&4)DS7o+8|sA|pM zu78by`Xu#JR2*LZbmhR)smg8Li^;JLUr<-6x1`O6){`lh80In=h-YeI>0{PDGQr~U zHdj>B{n`MYJ!c%zwh+=2dV7+6oc*@JI-y&&(wT^WO<#9*%dDydd|MMae+ZOJJ4mrERSv z>t?o!R@sw+PcAnODvbyd+3r}mNn*9K_py0uvz8Z3dP?-*+5`R7ED!qSZZry>8GXTe zMDh9?X;x8bF5c=Q!_hUz`Mv26dlTaYipldHiBGavdCK-%Jl(a2s`W4tPl(_>r_zalyeMK@SZ#o4>Q+pbDaKG^s`i&YjuXcL~ngerM@yx-RWqx&*Jb(kxtp@ z{xknWB&7D7x+?LAsemgxqWWWN%EgPq#0M<(cXMlJ$Ez=1ai2dbrpwarV$~l=@=j@O zc*&q%YmOUTsNp|)rrR<4+Hr>qo zeyqV1l&4!S+a@O`Q|*?&VQr0m{I%SP8OwF)0O?QS@5_|AZ~LUqVz5*F<2v_J&*Sb! zmBT!An&s&HxO0Af|J36SLd#0NsF{MmUp&Dt5}!vLZ_|reg3x0sR74KEN$y+31<$O1J%?*SS{}WxsxTyqvh4`&l2l}+GlIf;#M zJlRMYaw8c&hWv0U9Rq_+A|@tgzcban!##|Gd=OT0t7I0(?6LRA`E;|l{@MY~_-!Ey zF1?b@!B?hV;r#f!Q}#<~E$vlK+dRi>11lq&IIoQNM@rZG-{zzfO;=dxjwNJ^Zg1|a zv;ZG4QE!BV+pTPP5<^yPxoBFp?h_S-Yo=!RLv2*;0_mdE^zsdl-OlTtv5>wbvdsSS zRFS}8V2G?n{VkWrMltM0gbnWQ^8KInIx&a$gP1O>gg?68x=s}9p`r#84#7>YO~p)w-(D#u3AlVxQ7V)bGM}sI0;#do7o5RDl1QR=Tw|#&ul3>J?k))61GK&B zl%cf+p`^U}W&Cc}XjC*@{_vNm-=4WqjTNW|BsU78Z?VID*epTFQ?2}6G#{ME)Dh|i zCW1s19qwd_;re9|*yQ$ir)4lHzq4^kUq?Zw$#^0B0@-8FRbH)ST%0pRnkE`GiC67s6Dy5;FnW@NoYLrCSv)7chnAXCm&8{T05APWEy0sj%_ zUEIr1kNa7pAs1{M1E>L1VI7X9_3yJT1uYFN;w$VwF+7;B2t@xp*U-UfF+RfJKOG}Z z5ES~>n4GZU>#$-2Fhw);t_z+pYD8|27naygz5(;?QyFRLx@?dX%y3puK{Khb{)_{o4 z;P0zDJKbFoV(6W;a!GaXbJZeb)W;qOtDDF0RSWzuStm`G*!{J~KOM{Nk~6g4$P%86 z+QI5u?6$wA6pGiU7D4Tx#i-fvE2dcR=;H9X=XhrZ5t-ZjNcBqdBImwGzB9He-XOceF_Io(7*Fi)u?sHrKis|9;hvnUOBJ9VR>a~ z`S==hN9Ul)LWZg1Z%v=aZbP&?G8?T&Q}g%H0%*XoAO8!3l;_p9Wt!O+A0Obx zP{4P0U81f_k11jo2(y#~M?~P5P21|Xv{{{OjA9WHsplw`n{MRJR9lz2u43~zm=oTR|)g3jc_$SVt^_c=fR1pxPLLwbIjXHM!sqSWp%AN*8?P^`5^ zwM2h)I>lSsmk61^|Jk!=wwrUlX`L9ceAI*-7Y{irtk%)k$N@6=M31hc5&aH2Tg+Zz8Vdefzs!14!=~T&Q zi$>B=AxS@|w#QmQ)~{eQ!5zYl;Xh_e@Y@S6m|>2hS2UVF>nq$xAjR54n8zf+)tRWa%dKS5KX}`^4X$G0EI)o{raTe! z1*DvC5n0*pjwGJ6VY%tFxUk=N3f9Y4z9$45L%Gk{3}rHft_voneEMGy%wFp5OgvcW zQT7BzO^BYBIc2)zj8@cHv+s%hM6jLbdtii!%?^W89Fe*2hEO*QM~ zHTLfBv_|$|xo4`;!VyD#xvT;v-%;|7C(IEcM(U)d0sn*}NSWK)n~={bh=@e*4%pV25;CMJIr6khy$XHNT1VLvhn(A9dfRS{z+;9P zxegT)m99xsN_{sy+2`~Tk-{I$aBY2Elt&pKU+n*N#H!POli179bBcOu8 z#3a`^i-S$kJSL8P`(WJ1>V#}0qRt6*@Thd~HU@ZR`J}FGC5z_(r23wVx-$wpc%V1# zlMuffs{2T9*q0;;R={fORdSMP2P9+%jsC%E6O%iw`h?H7jV(h%EN{Rq*lzJdc*JIU z5e-bXbIa9VHf|()b2N+IW=+-zco8nAEiRJUvXzrq95B(bm~VFiTz8sn8Ie7LdF;&k z`qVk0Tu*H!Uns24#@ZNx-gnGdpm2QRu+?`LxE9Z-s4kAZnewO~;v}iWBqYV9rD-nX zS`4aXzA5ve!IpV1W;r`%>RsYJAs*KH0|qMgm)#=Pu~ohli$UYz21oCEB@YK4edUv3 z4H`Q?K3l(7Le-dc>s)beXlQtdUYP)iiMF0JMKs2MyuA_e_32FnkxeHM87`)cYl4LE0G@4VSg_4 zpaE>-^=>Sm&uofwwLNH^&+W>Gq_$!SK(o$JvS(6j433Tht>fe4rnd?p63c#PusN~G z27blm`GR{O?h6Uaw}$1`zC*QGgYZh;2`cise#F)7Gfi0%;{P2108U-=S! z0W;1ri8Dm-Rrh zU~8oi65Q8wZ+J&1CjwFoc9RjEt`Y=aJr?sDD7@DF?~hyTHk}7!yF|Xn*4LqZBY9eW zN2~Ouw7(x)WS)UV$f!?CZjD0DoNSd)rQDeoykGA;DUoGLIefJ{3JDW4OeNX{01^A0 z8Tcl!Opwo@wlt(*Cl;O0)*D^9-!$ywE*8?;^e6I~Rn5LF=-`6dzWrCBwceHuLt!H2WUz>~!7z;7- zEtT@6brb_#>qxPGkIiMs<9At-eR9t|R`*AH(AO8PQ_*lFfuEo<)&2+N(6kDR5p)n6 zKzr);!Oo(=f1YTtSL+13nNuQlB6WI4J16*=Z%VK!C2<}v40J0>!Eren3xcMkDN21c zzAhRUFyA4pRBOl&+=3aG04iQzifBm96T0_4rABfhsvawHDI_Am)?v{|S;0pu@y!p= zRmR|u5cJ6s10!Jh#f%Q8akvSMmUzhZjpv)}o=&KSR=~hYNglm$WAvFnXVPx?(DXy= zm9bdc`anIc#&kD1%0F6wD&)tfRj_#s=e@VIyBm_0h69ly?}{{)=SFms%&X}{mb zjfA3QBX}pw>sgz=5zf>|n0R(-T=Rnnvpzh`= z;%N+cxOwyd$+m>xA*SNKFkhdtTOs$_v(Y2+oM3rp<;@3ALDHlFv>sA{8nR~#d>)50 z2NoV4f_!}Z;>nC|!SKM$?d+^5nVFxyacl8S_8!zRjpsCz5Jz@&bSC`H%U$y}TNigr zByRrqWLLZKWd{Jl2?+_8TRl2hW9KKX$35mNyJQy*Om5DY6lPPlRZ&*!ZCz<~uBX`e zM#|Q!y?ubrAC>Pz_mb>keJ|f zb;80?QQ_$1M{0p@G>&YB;!zOe46uUh;gX2YI4QroXKs*5@L2%mcm#Jx!RPDq{pGE1 zV;fhQXNrzOjU=x@bZgj~@B}o|6?7`K&~ss8UU!?LudA;o*HB)2==YH5jsz$TQ~a#N z#gUbi)(W5L!NZ5lKjaf8<2-TVCyz8LzuNm3wJZG(X^`k4KF%aVXyx^M-Rmyh7()C zmqnRO@4V@t=Xa(I{%XT92ok{l5PXF#yyL8nh}{|5+DO)Cv3%FrEOgyh#ue$eUW%N? z{X(g1@J3~@GaYvsjyZOx8{^%s9rR4} zgp%jHL14rycx%D+y7~3QfzDj*v_S|_@@HR>;|9QrPzcwAmGrET*c zJ4%9;B8Ag3vP4UubM8(?6}(eLefoomiAh!0#B>(q4bvy}T752VZX$Dkg5O4gMfE3} ztb})o464?!$#ufDB*T@NYO+0SBv)Ks9`rUjn1FS0aZ{%%TP9v09PFTa)TcZ@Z1GAC zV7tQu$8N!GWwy=JnYhK(u88p^XAuWdzGKI3Ks!v-T11OXqda;9()Y9PbKiM~3D6mC zxNN-`0Zqibyg9ViXaP;p{-mI(<&hBqiE#2SliOXydgHIauA<~V zcL&nDxd-11RfMxblNHjOK?>_8=6gJ&*~z)rzF%nSdx=Wq>6uaoYLFE0mpD#yj6M%6 z()M49s5YUY`ePp~9jkFw7S&N!VMjrdHMRs?9s;&^(h37_6*}Ql8_7D`&PdcJDm^&8 zk=$DWW0&SyyVm&TFOr2VJ&`&~fGK9}t^EP8?t_0!<4_8mv8MUfSKjw?TZE3TTJo=8 zyY~{w0uR8PX036TW)qLR5^#A_Q?^OYAQjI+B9|o0A68;C@cP&=e{;B!K8%#Vt-G79 z`=y2smCU6#W1@4B#l3lfM)i|iYc|7P(+dZ{C?6akyBV(EJ{4ScPlOu8&k9%!@UuOy zi+u;EURHx%;ZDZ>s&qN^8F2E zS)vsmnQr$?DK!;2*>e*Uq7wbC7NC08b(=fgnhblP&S81@+bL*!ab-msc(&)kqBz9F zJ|QwA7(vD1zn|e2wuA&n(kbT9Jg1ac(j`eI;%&Hqcc1%yYW;;%XgxLpU>NBu6QvDi z2On8^JdT~2Wid<`@dXwx`IR)?NSjJimDXS8v(+LXA1PCS1AQzWINqFQow4Qmc&~Y9 zdt2`Up=EqL8ib+00V@YWBui^+!<`txYz-FMm_kr?xA2uNO2^H@f=R31S>(Dx)6z~x z4CIN>hKnvQE-Wt&-~i=DtWHD(k~s?f^>2Kfqv%w9a}+=60=ot0Rk^Ny>q=@Wbk)+e zun2>NY?Y$}1JQt88_GCd2VwF8pvyMo8xI%#zIYB2-L#5f0wUwl>(9-$`*=J|i@q}3 zKi@W#JnMxKA1EHOo!z~zHjA-5m>gzWazSfm;!$+y03x64v3ruvHZWJ$C)QTT7ibs= z{ECPM-s6Gg$;Pvt-Q8@}dJG7e>(?;VVSIw_;1@RQZT*_l#zT4RAAfUwV-quv<1{mH zEX+P`9kDkvGmYUkd)6N4ZaVQjxsmMgDY5YgP;0w!ep~6#HB=Z+W0@>nO;_wW!;3Q6YOq@Y7p{>0 z0{{*RsqYDEfC)N1O=$jbMeu1$C8mZGc-NHg|fR^ z`QehgKwD(>_KTZhPU;=#7qeWE<1yDE7v{&c#~4r5$B26q_}N=Fc-FWwt#l^_woCoGeZkSD9dKAt`cLc|(78!Fu=#>~3+rUnLH-|=uC)a_kVs{~m zvrJliQdhPF;>iM6Iyq^b2O~(r~Gi|du=NX}w!*3-cX}a<@Q9(S^{2Zj-?*_Et zRLVZE10HQDdEDly?_{+JJ;<5qZZ4Il+sSk?HQwr%Raz+!V^p zRLhJR5svWncjwhN=g+13lRR+50iW3!ny7x^bh4kP*6?{@;lmnJjKLW!K0I1QO&5>- zCW5LoR3)z<8baFN6P+Ri-??(d=D<&Y<8-}P_!&zuba8)_9+nJ%Tx0As1FCP7o^yF+ zMc-Om1n-Hyvx>UHDrk~W!6JGNZ-c#EPY(9QV+s$e=N>Ab962?X4ks6({kks{)cw+K z{t+6Q{>o3CNSng8BTAF52wqIFP|}y+YS~6W$-A9ks4KegK=(n<%?SYxWMeKrOvKEc z=&w1aW`T9|?-M8Kze7EmR*?5Hr3FEs=bt}H$4rb+*ERik>Wh9Kh}|w#Y&)bjRkqi5 z-*NNUjQVGc;bTuqsrBLQWA~F?Hky-*U^1cef_l|DOLiyg^+i9ddBRY+i{tCL+uECY zXIhVtE~}%5^hz(>w++gT=X_yA=kl-prLsiui=0c;ud$DXZO%Bbx=yPqE=pJ$BHaK-wm)*Z?{!mM#P ztyJnv5{UPrc|u!^;JZvBUvxl3hs~1yj7$C9I7Oz5h_v1Q7!f8W=66Q*$NSTyAfu;T zsHJn+U3t&oNCb9Kyh594a2j6W>1MWwW|cm{9(I!;Seu2REfy-4S$bVb0xIWj<__9c z3bcUDhIF)JbJ!XHjfmO9WywIp;s2Ay#`bNq)t212=6a_874Fv8@~(xO!&C$H6-ZY7 zl+Wi7YS_H*-})Lu;iGrPF*!9+;64n0EMsO@Op|3pr9*caMo zdXuHTJ(tf3b8Z%QXprCOR{C4%lV$rOznSY3D2Tu7Lm|@+Z=;3v4okiO_r1W2!eug^ zpME%k0Fv&@o5P(J8Nm$?1j0&o+Jo>dGV=Z(^4>Bi&Td;5Z9*J~;}*f)2@WAZaEIU_ zSn%L35gy9IZ5OM<(*1$PMUa7KUM+H0?M&f2%`zf-qvSJ9OwP4_#;oO8(YJY&8` z#f}?dr5ht>wOQJM4V3}3tN{81kxFA}N7iXgIBAi+B$py$cI2L$8#f87bU9=LM1j47 zBbrYOg-O4qA*oq(EnHHZ6lqSq)(}69Jg=`39T)A*6p6B)qBsxwIpSX*7Nj6|-Gjmg z>%1?NdN^MxfjD}?7`yw`hFI+{80Lw%#G!YDoM#~X{QXBghZZkd9v>1hjYaO8z^bH~?gQB})to!ZCoxj}!@-JRww`Q_0n=IOiVKp0=Ja@pz2g(9b% zb=~X9R@Lq8**a(62YCNB#KlQD&##gHdHDbN|8BVulYd4>mj56{`1RvL%WC5U6Tr%! zhkVZawOOHWNMXU00_B@zy5=tt#m)N*5IO9c;J`6Ns>HbuXc3ssAe|Lov%Sx?O!HcI zv^X#*-w=-s^faO@EN6NDyXXX|-dSOh7}`ppLn?g!?Q!cPXZybkdO++Gl=!z6=wII% zq?=xZl(YQXg9LY#273Mf{UPf4|A~(jE{@gv`0*p;Vh40<$PqlqfjxQu(IXRE*xg85 zz6#L>NfHU8oF`JL)X>t0j`{|F5=}F%`#r#xO~RkL!5A&4*pD9hhA%M}t^KW(dhs0{ zX_Bvj|4{#g>Ej@%4Z_i%m{;)ryP@FKt&f!_-v4IO|Lf*2?XCI!tqnzl9NWZXARejW znyO1kcvH8pp~{6(>qkdP$AX6h22*~ac5J*(r>HKqc*X7N9?9rkn z-Pw5N?=I)7CDeNx)3JRiZB)A;n3Dxnl62#3#dMV@O-#iJXdO7f)$lA}+z#so-=2gVOT_YiU^P}2cHr5m-Q9L*^gvqz+>poVaKkZNQIa9bwJ!(+ zHNCllMTZ)p_KM5(0n5EPcWX|p7l$^Y%~R0zsPBv3o;q$}0!2zweLt!JIreQVnuh!> z;X>U}NVVf}wQKqfq%6Uha;dLcMTjwzOrxbn(q17GB}ub7>x@Efhn+739F~DRYW`&h z-|VLe2;>-TJBW3=!#-sV=jI!C+up{ck?1CJX$@s&e@J$IQJaOM1N5R!V&QgF0$XqJXi%+r%D=Zfmm$Zayw^kclAfLl< zsBat%r3%fmf(jipGS#VXxWkI|sitR~TN0Reb@-7FB9TUAC_Ya@yH@oa_yAl88d6ck_g75mBqO8>q8L^m&Ii5 z8X)zM^H$&OcE;;5#<~`X{v3pykF}=GI%4@gmF95CX2_LL{9LK~4t%L@dsM7!Qr*&z zIwy>FC%}_I7obvSK?_nNmjG5@H=pnh=CjjJ6ArDqp`>Cr%PsdGeDZR>f1GnmisR5* zw>|6=Sf7bgeX{nviov=m6 zcprMqNtH9yV?hvA@MHoz2tg)&zU!hu-xR|B^5sSJl)mS!%eWP0az?#bg0|4Cz4#TH zhpy8coScltCr^zO@?z6D+Z(*@?#$OWHimwh5hgjwvp?RsmL?saJkYE+TT|p)82W{T zbZdX@32^=rG2C%-q?75gHMXDKAC5MPW4FYvp$3sl1Nt4`YeS`cdqca@ZkmoNBuiqH zX#t(M&w8~d7P;4egN@^ML3a;MdT3=puXEm1g4EW5J}M1x9_Kw-MosR+#UDRipoqeD zb8|zu67j?({O;)$pJ)rwSzM<)D{^H87YQhk@O)b&KB#0$w?;Zb4Bckd*3kH;s*er# zRk&`uy#5TtbK(15g4$!F=OAg&ADnJSbc7Mc)zt7Tv@0%7e>#0?9fA_U?ncI2%L{^| zn?Rdm`r$c?flF$M;~^i;)zSt5W41n2YvOLXNU0_$&M=CJg z4>l@$@-N7p&o>YtK5e_s-w$iL^VDNd3v-<}^~x2k}_|m&_KIOya`%8WMe`zSQ*g zO1ps+jJCGoLhV@3xg9EvM5>xe#b(xL+d$EGCdWwkx)Abi1sge=RH^8O&V z$pa$2h!KMbpwq#*YU}6-%E;V9SS@wG**s(KC->9~m~9lFU5yH(R;DWefmnUW{Q}eO zC`C2o0Dy@S$q3Rbzx+)EWHoCsDceRtLw8cMj_Z1F2b`1Wq#wj$Ydo_YnGp}jdM`j|gGDssW%dwYRe9wY>2 z)0M5D$}G1&zH;xREE*(8Y0ed5x}8COAkxf#yD)pxv^_rWF-WKdXMPDeyuT!(R%)td z4hRn?hf`xns9V0HgdXHjdn>HpetCF*aH}#9P6}8B2YEEk$?G0dap1Yio6zNNUwH=7)OPN4Cx7pHmrFrU%GcZD^5_%mHBU-NKxxFIvym7W@7^Gv0T`!g;j z#)I`wQc!)cx}T9;JvqRY7z6fZpujjhZXHlykc-G7r7q*PY}HeTK0ia~wIU~-?DF4j zt6K(N_5A0#083r%#B-@QrGd-_W}dEGLB6=mj8+E(}^iM)rGOa}@ zt=b(#R}2rP_A#WRzI0X+b6<4un(ZmoUcQ_S0F6GgM8wVl6-wgtOAh<@#7as^Knakx zIa+j*Ywh8_g!~MU5EaOQL`$yvfSuzLTT<^{hSEaY+EYa!N)&0j}4 zKIBC;p+_D$wj3r)$3Gd`xFugEB?wf|k<~AA^*UF-E<&fC{Ga3nX3{2u*(%`)36FuvWP$8foMp6ufVQ!5iaW)XBb9lU$>>U9Q()p+$PTMsi0$KC?L zGW)E69nUYX`tHZ$l2yZ&g*V6D)RG|DS&#wPs%y-~joD`GDzG)U+<(*{0mm6)+*c8Z zur=kfI@~l>%2j_6Ml*`jWBz5P(To9r zcoCS?SVacYQV>*hWuw2Z2M-w9@2`d?PNl|mH??gH6gc^|vkcd!#+N38oM;_Lad1}) z3!pp3`-36nFNG8s0WWri;jB4NrA`~8HHcK_O=6`MMv+uKxSeE?RI$FI38QJmUgDq5poVix#=+7rY1GTUew>*O}SZZPGgI4q^HyZY-uUUm$b;zx2@u4(_1w?ZsA_9yo%_(EecpV|6!?@FlQ~;QN2k7cXn@6#v$=IeKVbMws>jB`n?xxO52JPAB z2!BGpj`>?fhR`6uY#`03%B}-_Zb*jZ3M%R-L^QOeobT@fXMx@5%)i^@OQ_v)2h^)b zzf2+>7ZXBBIOSM+b8c;^y(Qig<=(w}{PCKo@aGH{Jmq)YjTDW7__PH0BSl`4WIzX- zgJ&e0%N5NyqJfVmYP2iR2eJ z>~5<`j6Ym#@-qe{@D;edt?f5@a}=Y&^!BI{YMt2Dw{uVmbwuEvNfI z+_{tLQbp$(Xk$E_*&PPlX@xedn6v$6>(v3XA9XhlWmDvA1IrUc=xcM9^ zLv(ltVnRB)T%Z^@+((zJoI}teOygr?2_e(Yp3e6KeVWDI$dkUx)cXGRhtl-Ypai=4 z=Ex}!lG&jlHgHIze=2%rp51WRTh=bN0YMZGtQqe@ZeB~pc?s-D_$0)Qi^=x58ZIrj zUSqD|JdKifml3Dct=-+-?_KrVA~=k$y%il5dl?jU6^ZZkeqExkenx{vtn~5ICk~4p z3MjxaH`t&b$`i6=kpPdb3VU$=Af|jzEg#3R`@*YXvdHcR;g}7i95hGOj-o%Wc%bjf zVNv{f0)Ye6m`rV@AV<)(NAS8}72Gg-BlGwIG^=E&Qx^f|Tb+SmZ9Nrm6$_^mpB%$D za$Tn2-5TuuV}h}keEa>{fn?L=vH0hWhGHIVI*p2Z+e_`(=+hGZl1^{i+fOBxoVI5< zE-o%SDe0WgU!?gJmX+Oj!H3os_zWBH2%o~(febkjF1;5L6aa6L&~686BTCK2Tc%v9 zK+>D~KJoiNhDN;hv|sYHd-sPPdMf4Y*P(L6;ZKCxLOi=}YP*Rb`r|P_-h1~Mie|8mOK>b?<5DZfY&38&?*r2RTwSmJwjR&B5)b)oriOr>dJ zmxFG>v!F~`yVT_}TMLkBxtLW^BK z&x}+*Q8V*yD2o zDXokKDPj-xDvIRaZE0>TP04Dabwj-j?xqOZp#y(?_6T)%=cm); zG>Ay!;M5ijcG(^LbF)1*CZ-k4$ZoD_uvRKrY(DPMhM6@xxAqAZCMXqHvhg9z1U#$r zE8IZ#G+z)_Fk1q>bo~90AEZ1D9zILu5aVdoidn18^i2H}k6P&*cJtA?g9_^>YV5qi z^EBnk-`^j~_Br|?70E-x=x|$U3}xRLDLrJE`DDsOWV~ ztf<#nu>LCivr8hJEAm1=m)61pR>*Bks<+!GP&JO`9TN$?oQ(y=Qa1x)p}h{*tW@Bf z?7n7}dPU_-wA4MmW0IW#e-vR)?K{&_)Za^c8^U|tdtB~4BG9#rEsLa`eS!4$+t7T> zhf8*L3O4BmSyY9~DH&4s6&T=+tI5uuV+|g?X^3oA#6B?k(fC2NDbhwmG3iMyN5`j) z?FXTNUOWi+9Hjl&TDL6!zW5!&3+$~WP3w2Vp|Ih0_EsJ?U9#bVBBd*itMk}r>E_Lw z2`=S^@juTDt|hL$ER$Q_frPtAXDRGv-qA^ogRt;L_0OiELcSGAzy_TTH=+OUDN-&y zmulllukh)3sVHSZ_|n1q_<+Y@C!*WQW_3;x#+=(xZq5qNN(Z$(Om#>p(YT8Tt*>J)rZV4YI`#Or+S6s ztuM#h0eyF>dTfs(mKh$&0~8!I@VeOs6~uA#pST^MlK-q73HX80Kzcp!TjhWk?SM<_ zEfUcJ6k_Dm5vPDbTN?@y9sPR0EMwK3CF&Mvg~7R)8}_^eB^c@t2I=Iqv>PBiNK+7H z7jsefgo^mjpBdE0dupviLy1*$Exm$}Y>P@z7|LH}&j=tFlMdwI~ zQD{O8ltwAccRt> ztqcI4@v?$~pE<=`fCl*9^Phd1O>UYwkb$w!=dwtWo4aEFhoN|QbAdX%B|Z<4n`?-% zW!zu+%w~^gez*#vU`5X2HSe05bFNmbf<$C_VGqs8r|8JMA9{nE3<5a|bYI2azq#>P zhsFt6c>@Rhjnr!Bbs4$D2;8}o(^7of z5Wt5UuJ()D=;UeX{kAne>I=WWd5@K>!q&k8C5UkNrs+*cZyZZhu{MaU;@Jh3LqrkM zbKC8&|602Z4!V&xm)WUr8#8^6w9%n$V`j7{Erd$tx`6>Uv^>0!k!jEjfC)}88QlRM zI|Z7!z^s9d;)}SksNHg)F>h>Z^X^m|!goaVG6WeLEJkL7O=|@79K6_yL;Z7jsAE=c z=nna0ysaq@vY=ARRz)fgA$ekk%!|yakQe$!K;t0`#a zO7~WH+*8`If|&Hg0AnT4#xJ0BTHU)f-x(bbeKEgxLVaSm>^z~&mW2}=01R3f+3wN) zhD{e&|Lv#X@yzLE0e}LTiaDa7a*8}ULCrq;1I;c{vAjRY{i1`RU#{7oFcsPeLg*C3 zqS^DTz^Y;NS}#2;Ly_&7N;#UsQ+Icu{f!U+%U>rL#_l~cPH0#ZFFkfbN8R2?hgdCX zmjtcn_TJ88J?{{O40RBXnp=TwU%YtnR5t+L1tJILzO?oqK`Ch8 zC&L%V9+KHau{iV0JKZ7VMRxdOG;`6N?wF33x-bK{JMYHlH+Q){>7cH#f6=^Mb{q5K z?VH*&VVaV=WK&q5#PUMIhS{>Te2gm+zZ;f^mWM=!MJ85|>S>4WZ;iocnOxb?Dr+RP=V?foLD1uIv$!1;cPv zNzPAuR76`qA%iEQ;oz!(qMdaJ$>{j_S}S>+(!ezN{_wVrrKP2us%n(;c!~1cSv?2) zfnJ|jEy^8U^AepUB^!qvuPNfoz*3XRbWe=U-nG2_9(J^kCC-fjW3^U3nr=B>XE)FX zW9^1yL#gDVn_bYR-ea}4ww_q5Jl)^mk&(7rPL_-w#y&cZ8FzKLoP08FOB*RoxqQCw zS_JnYc2KaJT*v?(qaE%8osycm3b|&GS!w8nbn}w+Y=cEgvifjUQWN#gvUbj?VJP?u z>qCt=jxkgHeY-tTp7D$#_*DmOQF0KjvWuHjR=(t4Az4pF%!y+Ec2}oj>|uXf|#}lPM+GQ4xJZMHJ?Zy z@sf(dJv_7L+z9)D|LQ!?$ywMqk3jb%Ij=M~_t%5se_*$}R+kQ?dMP{>Z^SL#tZKt{ zqor?pt1a}U-yQTGu$D}7S{#_iak(Q>{^WOk!LjQRt!g#D-L!`C1*+%jKt^4O!jlO)Q^)tHi@ z&bzVK;TESXCl~ZlUxHEV9fuR0(v};8YLc*TZ^8$2@gUdHD)FMF>v}$Vb-Wiwwp%WE z49k*@O-&9v;L!H;DavnPWM# zw6xqflbO&asTuk=+{8=3D zaEijlp(d@edPoDP>?kgC&naT9^6uV}HKo;2so23JC-aSwl9C_YOA`mD)8=&pT+f;# z%M4qc#RRs@H>+_ol}a)z?Of#L^sj8s2TaRb!Q9`_A-6K4;@vhw6l**G$s`v$dx-D}6$-+{fXK&3d<*ln_+GRCKvS6}CfJ!wDY% zdC=->mVYvsyOG)+{;(sm*M?_p#fRf^aH)uQusfPPWT!W=7)S{5%d@m_2N{b6H@n_w zF*sppw38ndVP>%uOTt-n3#86s~bG`HjOgBz_NU=AYr%I7!(NMNpV%}mm?S>#_ z5so>6fBOwOs%WPC5}gaHdE&>qsMrntjlx{9P^u{6>Q`zV0le!OTT5meQt?+x&V0g%XV7u=`ty>(UH3 zZIpmbS4Qtj3 z5QVCB+I|)LvO*x)@_vx9QxBgkB!W+dK**TMTwuc=iTJ|t?EDT5IB;ztB4MT4nW|Ts zuOR;A5ps*9{a;=p68aviz?=X35qiCq>Azng5a}$h!r;6A`QT3xC`A9~EBFT$Mv&gW zzLqikMuYkKA6uMrA(|3A3|dG}R+{}h*-3lA)chzg4C{Np*xo|E@Hd$Hq8 zl{(CNjMvaJTOq+SZ_gu@fDufL?N7AtiAJwm6cfHR#Bb=e{gPsOuX)Eq zH}Z-?p5_jA8P%0peV(@lg#UO}Dk!h0$db-O8Qa^HULCHhxO@N;j<5V`mHJCnW66P62`A4f9v5HgX)iDWmiR>mCrsf zCdP=0az(7vlzZkhWQ^~)*tbcy1pe^8*RNvNde!~4HTor0x-tAhHHe|Ff?{GdSl+%) zh?*Q%Pm>ZcZfqkR+imZfy?Num7An7oupX>hnH8aUZzl4#S90TQwVZYr*$*DjEmYQm zo4I~`V|h0+o{E-MQZaKveJEY_p%4*|iEB(mL^>KaUAkd#YK2)=FOOfbNn^n3+^`0# z@lYT2<*;H>GkX7WLH$5wNr39qv{=x<{(hX8SY>AIxzqCaRxQ#s+|G9X=MHj@G0{{> z)vP(WjdS9U$M!B+{1$e}`x*AJ0%PM3d1z5=B6M^m`}$kM9o_rOQzCgaeXQe-DD~$y zXL)?4E+a4=$C--o!MwaWIeB?jz15c~X=w|Un#03RR7^YHem%RBsZxYB8pgrJMNPS4 zRJw%TDr#wK+i9a_@b>K^T!&Xd@bvoN;J`Z|K;GFo=6h1o{EV!K_qQOw>kf`X@{W#` zcRbJO>QH!0|M>sYPf!s@lebqhTgJdC?U~Z4t`z25%`EddGn|%t-ym|tFB!)h+HO=rxf8BHBn0-&F+L1eWtCc-Fdma zWa$$AK%K~i`94G;Mx&{051?(RDU14i(+o6Xc?mU=b*h~?1a?0f=n>@^@N`CUXaxp( zVY*OJHCMJ?|3pWsuHdZh9C)Cja{pgFOqqAeTz~iKv4NIcET`Sd#PRFSNG{;Ryp~?) zK?YUxXQF)v|H$*LPWP*z^$qj>^!qvkZEFIgq#A=|XiLUGE|8vM{cTAxbwxEZ8q$SI zVPCv#u%c*N^*8x4Gaez4rRpy_v%}`!*i24Dm%5s%=G^S0qRT4>l%ku*SGlm#|f*J&Wh}BujPW-S3)n>GJADA4fjU0jV8xLXt&HsDT_>B$n`1iZ%hkL zyB?N6LX-rdc=Y!q>?zUJ(tP>c%gIZ*;6C1B?vnB?S4n3#@6Fn(T&ON`m*QPSC8Js7 zb?2Wz5-aq~^F~|NLLG_dPpz(@B6@Dai>+}FE#t#D!X3A)G7`i6s0GJ?C5sZ{(_|2c z?GGi)j2)__LxU%6A7d@7en)FaYYWSYhW+phMD_pK=XNr7_b$gZ zA=A3hsHmt7hq~Ge%lGNID4(VVs`K;>dw!G+CS3549)m&o87bR!a30+aSj=4|n2+AL zgbsakHuHMjsrMyDcn1euW3@UyZ@Zi3E2Vs}Xb1O*4~+A2gVc@dU%l)r&0oLt~F_sO-+3z6uug&*PC~h>{s}<)y=}K2b8TUEAyL<1Td0sHRn<$I=QV32m>az7u=D9_ISgL=)3`oP4OH=0+-#c_Z5 zw{O~&G!rx8ocL7 z?O%rdKSY$S>M6BPUdFw%mr#?D*lf?%?*6oNrl?i za>xTzy#9Jl``*}@Vj)>VG(pq7&?G)wVm>5G8dLtZ_h4dT($a;OV>Dg&)6YyD8Yx^{ zT&O%J)SGFcW?St$Z{*gh(p~vZ&^U2F)#`+FE5lCOR zBylu;v!NjPZNo_FO(Gwu($PPC8SmXT6jWe}uNBMXrgZBT`?Pp-TD+pFY9+X&Xj+w2 z*YoRQY&eCa(?lGWue!N+UnB-RGsc|u#K3EH`z_<~c~e%EXqN}P-z8I^gmAGP=^@KR|$p&S@I0q zsPe5R!C9-ut0CP*JauOKN;VtYK~U~PY9Mpt&PDFtC26g0vO&7h;CCDU%D>&CXq(AH zR)a-nAz`8}whG-K6NY8EUqb)8VXGVcL}44s$SNLKbfIIjROvpwBwemy7Ed87pTk%x22*m5IzSWwA0!w-+q zVYsutmezLPM68r|UymeJP-7Y(5J$*Z%6+*m?)dwI7whWe6EU$i*TdubyAErz@oZ6y zR+%qe94?Py$Fa~G+nQU|oOYc$79AP~B|{C`;`ZYn;muP)AGaeZo7IUFFJhw{#yJtp zm7^(K4yl3ygT2Y`=DOwvdHC#d*5@ZBPfZ^p+zTJq`Mi7(OataWD@^8;&?8#P)~r$o zz-$l-MannJT*yS|`**39ysgKkV|&4G-%2gncMfKKwS+LdQ4z&zMA`Z+L}&7&Ok>&n zy{hj|Tp>NphJ-M+y0JTK&}lbw!Z@Q)bq2fC!dNJBBK!78E==R%e8hncliR`eE>l3f zXaWjCL5WQ=%)-`I3A|vYT#7dq-D@+~i)K=^eYog>d;vk8KR*QF(9LJh1m~ulM08Ne zjeZXEEuZWgM`3e|lS@6yXHGD($bj7y=hur&H$+#*7BF^=KQ7Vrr1g`n@sGGx zR1SrCi4ex}xk&|CPrF$v2ki-v8IZXE)N<6`;*wjeI1% zMdLT-i1i%%T_$J$lO8p4TLb@HsZDR$cI@b=tiI=!TofeFCBTKY-!mXFypE7D9AQye zgvK<-T&q*=aC(i4PhoVIPlokGE`3>)Lm9vF;6J-8bHpy6HXU~xf^8a2?8f0{R=NPW zCi5l1{-J0MySDX*@vDf0WQ)L?2!w)yua&om&*`QzWScBTd*hYP$3bI!y|NJe`rWXv zu8ox#k5Zk8^#wJq?H&9F2PLxwl=RoS zBwAWn$QJ3-K62RmLXvrO1wlpBM3;X}9`l{C*?VS1_g3l;jS7lQ_Y7#>tEoKHFO_IB z8P??_60y5sb7yC;RfCbUf?C*r^UmtMkcnit@j}HSAq8=#I^meb%(?$A+robI zakLD_u*}UZk&4)Py&x@~56##4!fYgaxIaq7Rmt^e)gA>wk%z2~e+WmIK-P+hD_W-~ z4TwN(3?I9Q$7FT&*utnufV^DpT7Byr&D3-$q1xW)UVu4om)t@)xB&M;_X^yQYebzI zMJ2CPG4nMDuhh^Qnd@?TNn*Z9j=n`3`^gy@Mv{*@l5xc|+g>LzG()U&5Jpzu9wEmm zB{u#`k;t!Jz#Y^ETd6eLUmIE`n9r)`6Q<^+t9^N3AZE)*TBK|GNkN7cr{CC$%S$|y zRc>gGR2!C6U?5cLKWJ$p?MKSXnrOET0jw1{lHbSy!4iaRsZ0U3(J z`jtWUD!i_mjZug`Q*CnaRd7E!BlG#?O_?`B5rywvqaWx?Y{auk-=QrP1CJbng#-pd zrOG^FcQnje8+@XxqI2$|ArL=|uKAghJaCm|aYdb`*bs!-MGSScmRD4Tz81QtAVF@| zxZK2%ac*x8-_aPx08@|>T=n+R1A?$6i(=;j!?^Z#Jo!^{V!k{}n+b5F-T*N0d+4-k zydqUBeFR^iv{O~CyK?0DSqnV0`t7KIz=&Xh6-F}k;qRM}lEEs&DtSqjEo{oOJE?!v zrUPq^77{7F?~cAH63rVhqA{;42zN=OeJJcqWN`r^NyskH^{sCtg-^tz$$8uCjNXPS z7$YmXEgVgj6Oq|AKb+Yz(3LKjV3%v^CoxW8EMc0WT05f9tQ=M|2WD19q8@F@XPd&a zFYbep&CC1oz(_V7jR_=^8xmu*6=lb>dOp`FD1M2G5dUkK7Gat+vt}eA#Xm1l z06G^@0DOv&&s9P`RZ8L4Ld?>eU47aCC?oL(%+rDIMPi?-HTo3}4gwE35$@g@31||& zrpRi(FxixK4l^W#x90kCUkWtTkLgajxlv<4Y<3bnSl(I=&eRy!s7JlRYIVRO^zyUe zp9F0@Ci~COg-s!9G@csEX}z3$>mh1rP|$Y^9rD%n3LF3{OBzq5z1iDZJ4;PHcCx5~ zLn*hDt-j@P$R?cQn43n16pcx!Xp8L_PV~yB1;}b#58HmXNkz;@A3NeAOsi$|6s8KM zeX%=g*68bXMA%b6z1~uFiTv&LUD?i=1w%pD4 zjMZ?*o@ej(GrS(RFHka5QLVoGfE@MC*C8k>60&u8uP=z!dtUY|7od3a;3tg~F*=@P zNE!7dXI%L6X823n$h}%7+8B#8%>Cbg`)|9MN^=yJZF2`_lCZ4E_;;C4T- zgGfvv#<=30d5hV#&WNW-*r4}|_6H+KLSi`yb0CVo>!#=O;h;juaP%Ng5DU3+jdY8N z_|wk(k(S8*N!H~4;II{Zpuk|>Fu&V{gF?^%peP%c=gPa4U%!5>d_VIsip?;S@6Nh% zk`tccVz%Mpg7(Y}7EAjCIBWO2uP?e!fVScfTbr95XGA@JyQDGh(ZW@_GNq1@w?bUx z{jQKSW;?owGDAN8+mY)lMyUFb_p7ky5NgQnCbNZ$(@#%K+9m6}igm8Y;30i^MBvR& zZ8UqsHH-0CN;zbJm;(Sp(d-CW@rk}b{;|dR35G)R933HVP=y&R{W2qK>1UEh?_>14 zE+pofUDK5>^0I)R^%T6elR^;{=CX)YU0|-d4S8P>LYu;opd<%jRM;Mn~?WG zVgTPUzp&2e`tVJJ9$^kDqWvlqQAGQONl0pBf3t^sI7=%paj_$cUR1Txxd4eIw7IX4IEk>l-d-ebk&J_B>F9$6i|Zb8@Dom0i)(2MYn8Vu~r> zXf1Zrm8}*0yzI;Us-5Djun8Iyo!j^@yt=9z$wwhC0nMyQ_*EXI! zEBQTs3|PdM%IYckEq}bc51*VCPAgH)Z>){&uZuGL1I*R$ic9_Sg#m+zMMCg;Kueee zDx$nKh%42qy~EbZP9ESO3nDz3T#1V#7uIyQ08RS~QpvO54>1S^)Y#3E=U;CPtkmuI zQOC~Dokuv<%%qih7zyuFsdU<>i7|z}GhRc{4x> zE9!|-_^x+0zD)+xrRcY~sGR!5wT+1qNdUwKq}PU*AzZMU552%=%MA^2&~i$nDbma2;%r6V(ONv54L^3e6yi5VCg z6c@&&G1BmeX*;Pt>let`JwH=r)XgH8BD$f7rme3W_gyq)hfvm!-p zUqxkT?seSX7`K-s+>;p4r326P-Y6St5>X`Tk+~493#iAyz<|0|KR8rYvk3*bUiq3z zjE-(Q=geTJ=-v%)cA!;3P|7_1=>s&dyr`&hq3PVM>9m21)-*j9vaHZyRPX1HQupzP zGLcl~ux0Pmx5?X4j6y+3^7DTEmBIC?MNS(|RXFRWH%`lTj0qh=o`MI8_jHY9^oras z(#A#&nY|1O=*5jv$~jWmUmfaiO@Fw!4`6zxMN-pkG`@t8khQT2<7i~izd>yOdX4=N zccM-XFn%ob_F*Kg00k{}#N+{1RH5{z$o|32il_fY1(bXwnHhF$z<~P&FR)7$SksYN zP5g4kcw;$p=D@+hndvmlUZOpx%4%qt7x%|SY|T}bRuq{~@W%5>yPa((|N3?J-Jb-+ z#wqPU3_+36zTEzLuLyaP6a^`%kom#7aV?!kkPYAg$QM)#f7nR zw`Yl&PZ7!(g_O2VJwH6w-iv8BRz4{Si2>-U9w-)-Keu?}=5NYcnHEUq{9KCJz%6xK zhi~8?$bK-K@kb_g^b&B$ls{iSt)4ha0}`q@eZd0->$Bs#1eriwh>4507f2!~D2J3h zP~LELTRRHaO{s`HB=-x3ho(b?Pp+ouKx71S=A1=1IMQ_b#m*knvdP@94^x-tq{&u? zU7Q9_wBuh$F27!j4V-&~?<71;dm?;bdr)cdObFUPiyGB#xIfVLL6ZdJwNzK~7oGce zpRYVgcggG0fE6D=su+xnGLbZ?9yOMD5a5K%;LHcol-)3LF%w#ny!LxTZY>U*PaYI; zZ{=nkHc5uXYABkH7YJny)6nOX$!Bbc~~s!99giQ>%qA~VnnJFNj1m&AZp>#1)Hs0k#aEGlGKRjU&@kSD%7m{@Vh?S~lY z?aQ4EhBF$<7An>2>gwVc!^HV3+C{CEb~Q9m0B1o$$+VLXIuJ}^;xq^gLLwrq^-2DZ zo{-{W?CfQy?d@3;qMvHGqB>%2$6n$8Nh9j1J$sEpPClGIeFd#Y-9?Rm(a!(Hh-jF-Vrps45I)#YvTFxLNOf0_pOc|ULi$!0#dN|=ZvmE_}mdNg0&baH)kSX6%Mb1YuYl%|ROO^4`>4`2t zKK0dj9Xl9yyJg+t2b>-XW0%8Tpi}q-_S2Kqyj4Z=3&G#Yr1;1JY~;mn?!aQVVE!Jr z-_^J5xTrT|BBbPY$8n4J&h~B)6zbr4RjHaIN|+=C*cTA|vZLH8O;Z+YSf#4C^}83YgLG(nqDew-izfevlWFZy0y7GtHb$_9daw$?V(GzZ+b{73L9~FsefrDfG*c;kPl#)%aP!Q9^zMFY*4~YFQuRKwx zQo#i25I$Jbr)A)fn!&a~@e;>Q%iW#-SOs=WTREMMvluntAOTdEbXe2JKs1OO)?gw+ zZ_YhHq>rFm2y;|&%^x|Le_^H~dG1@cr5CcYTb9j0sutPxWXuUApA`tTv#QRAC&P6_ zVI(;!4n-;azei>`A2__ad$FcX$m94L()`;KO6#LIE^IE|Cwr@TKTECvu$0dm#gy36 z47L5!nqy%;+Ea^Iv*c8x?Yn3;n`fg>&9V7cAn_~u5%x-=7y;AK7Sz|^>*4MVB_`th z*dQ4xn(`uYeu;W=SJu8k04Oz?akh>5^A?>G=OLEZuN*l7sMBGWKgw8FlD30L{+oVL z2qrr;dimpBEVskc;ngaH3=T;>0-xSYOX=+;PpCb!k!KhJ}bf7neA7wEVd zje0l!=kiXR?jAynpp^mP{&9Y2k-W^CnS|gGA7Cj)14BjX-S`n*v9q(l2LWz$EzT#k zR7Q==gNkx09YRM;9Y2SDKR`DW-e+X0x1)3|kU1y8CjdueYG11o=5b5EL!sdPKMOBQ zAf->%G==8^DR$!c015Kc>Qzp4i)}E17hH_1YQ0}ynSmj zskoTMcpA^0>1kdCUJyWLLzN}7z8Pr8ssp zdIhN(F{qi>O$x&B?BhjG;7`4Z_cvC|%11t#eIn3@UKn>H)v6KxR0)05XrIm86jOgp zddOBgwb`L_>vJBrzSJy=Of9I9e!NBQR`)i~?Yr)U9Z)jku^S<|MlsQfTsXTlL*U%g z@y`dZ64!b>BoYTyII^e!VGyGz}oL7P=u@f?|b zx;Fm}`D@k2BslymA`y*?+h_TkK~mbwNGBM7#fD=|IrDUo(%XFmQ&PxR%QaM+WW3Zm z`^z>pWkc?}FT^q&6RI{%$>YDIAXic~t96Qg;4Bd;3=X9O(@LWK&LxgUw%fQws4`wj zMjC*j-llvhjn=uZoy?Ao92W4yvp0T8ga+)4c>(2`Y_C(nDT1O47#IHs><<#Dmp>cR;&4Z02?9Y!sVorK_u}4`i#LfehxU zsH4IB^TwvQUOS)Kz?#?^vx!%|pt2Jqxy~DkbH;#&+s#JT3Eum=8_7vPjP)JUR z0&KFy?%nKGY+6m@$D&BQLbY~)L=B%x{6HtuF^H8+MGZ|)LsNF+bhmU_fV6ajw6t`CO1E@3Y^1yETibKaTmSRk zali51JH{R3!VnN)yMNDqe(PCl&biiPt6s0tHWE{6Wo9a+GM^8x50n1~3Ysy~NDPNu zI2s+=+6UO$VR!_Qh!?t00RByiX95u}IAtmiN%!*SGsYuxkCjY$If-^7|AdWPS=n6i zVEz9>FP!!C^Z>n7T;Z^oguL9_5f8n9k3Gn-H6b=Erzih*?iHJf2M#QVm$ejayX3u=Wz!!n&)U2(7sIY zWBUwgAVj1r#*oKhH!WU3ZsZ$K{YTKs6gFFm($N;{qov7Dz6(Hi*TDC-o98JG$%j_Y zER{;i6K5jP0U8AS^jq5Klf!ThD@LH0AS&a>*{9XsgS>NCLo>40KzY9-*5C{lt(BGT zJL;Zk?|DMl_y|E)@K-)p!9cs{LnTEVv0(BCv*E~G+h2k?KjZtC7T_78u4cBdAYX z+2{^0AY=+82%IG4pA-lfXlG7oXL9!3`B5n&ikX&zD^AC)Yv!Swh4Di;u_qnq?iReAw@8HRtO{DHvj_mwa6|5m4pO4BEl1t)& z+|R@zSQNQ7;dh)PF}AJOMabk6CN9p;tR&Tm+}+uvCT})uKlbI239{6> zilC#aePa>2cL%}wNZbUX6uF$We?~zaYC~+LQXzc@)1E|U6MX0r(FhJqh=?7#RY2Jr zSz=%uSg(Yp9MyTf_wbDmrAZ~biQ9f5RbwQXYIJ0<;*6$N@LmDc3Y-DqI+eZ^d0h&w zX>1^L{eq@)3Gwi7FjUK|Tm{D>b?t#@)efR#)&*j!HVTCot zkrCf#c?j;`sX%{z2A$P)?BDv@hkI|{xRLhlou^Ur)QxC1TW#7Z`cDowU*H8=VFR%v z=?4zL56ufSXgYbwnsdY?A*{i6m4#2-DvVJh_gLc}n*Yi6LV}!KgngM)yDi-0spDym zw|kmRHjBHjwKKMImaTk=oi?yW_+L`kB=qP+FC*}}qau4%D$T>B;?7jN)02vvyRbOS zcLLQ0t7ijvcR+)V%K8AcuQ)$bFdo^_SRE>V`VYAz@3zhKzp)Q6lk@*bE_wYL9is`( zn~VUEO^V67&4yLsXcOkajU}G95ZQ<;tG^|rkhbt<3|@y{Si!Biw&FT#CX-+ZWwEl_ zVmbvY)5?M(T>5a`C=H99kUR@?dHc_~Q2s;DfNX9K`ZZ(wAIZ+-)q7%trg>Kh*YbcR zQNHEnFBVW4fcRwHrt%07+vK&{Npa{NYO@{hN|RcfbaRYb%p@ussp#%0FeZ`peO)^2YdlW?>8v zQyDO3X~;JN0$I%JY=loatz&kzFBij9Z+-se!DpPsUo{P)% zysq@fZS#UEITal(jy3{u`EadPe12vo^Pb4vBHQDx%ej-4itg&tqTR@jsEcw21_l5z zb(93$;T8qWY_rDr!}3%YMhcR7hkOAmhg zGC9xn$>7E$EnP(w6ALR6nB-LdIf&GV7q7x{;v}E`Nxjmo){bYsUPZQY;2qv70U}^^ z#9XD&&ckh-0Ena#?fiN8l_izHkl~j}s}Oz-7RC4W6@t=WcV#!TN=4uk zQ=aYM#_NZ!TZ{F4gRY=U-A%DTzfR8aB;!15L;DZxqGT7S{rP8~SVg79JGN+qj25}* z8+3JbKl@}p3x*!G+(C`iv_LA!kZ&)4l&r=STgk13S!D9^wDzZ#P+Aa_(nwqV38=T( zltaC=8Xc) zx75x-47Ajfn%-EuAD~^}XA3IC=fKz&0mLdA6{gR}zr3`w`13C5B=pLl^fAG6i^|eA z`kjipb6Sw;w&t7vfrR6c6KJNmDk$htn$K_El~eXscN7`}@lLkT7~&=#V=w~obXUL2 zEmO5L-t+pt;=3#RhCfy7`VS8ki(KZh{~NR0QI(is7Gt;cBQbjt zZ4|kcdXkH{U5Q#UQFMy-P*<`J_N;ov`niwafrR>Jsp2>usq6;$4po+j>&Q>IT;i<~ z&6?%;{&=YVT8)W<_pPd|m|zeMLk%w=iqRF#EpJdi}sX-0o z=18gFlR*9v&Ou!w6~a(Pyr=`6qE?F3YL4j1cE6zOn=B=^KE=|9521%|1m^PQu5Tn^RWDbkQOn&e&pXn z%TEPr{wtnJ1EA2wkl#S!BBt_4l!XdayZxcj+FinfPBm#A5%*7+sV1Po9;wnxrpS1n zOf_O{rf!-S=-y{fJ&I#c!-kpg3OWf%nk8X79>c7 z77_f)xO$O=)wIj|LVpyO9h}gOSSyGxUeG>n?`Ui~3y3EHo8p7KV^qVthV2A^cX^liz7urx6`_M zXuADN-@aIfoo3up@N@*`i7wMhqP6vqt z#O!V_f&v2ZuXq@AG~5R8nCb3C#A|C`7$A<2!T(V~jF7pJnwr`Ih;6l?RZ*^bTa`fJ z=0ZZ>+T`kP+k`qB1rbnR_eM?NNT8)%%mmqSXsqdrj_6|DXPwy^YQWRz>FWNp)_$CO zob3C0^>>lcJK-hIHD=3}-?8v>9%vq!4Lm2p&5hWbenwZDUlg{dqhd0doh;_dtP&B@ zN+{BMcx%%&)FV#!nu4tQHPd*8-a--bamRG8quotKXqOC5>Fax8>vm`aFS6vnfRkHh z_8;LMuv)K9&VPejJhe;?@=1;kjQCXOd?i>sFw}Z^_{8@rE)!$~cN3rDKw3zDE#^~O zhbW+uQy_ss5z0BFArf(y{|waaT)IF&we=R}N+kq@Q@}Jl1r&({A(B#5gYi5SgP?O^ z_^mh7g_5VY%)FFFgvw-{|eC2JkG&k&K% za96GhAX!puexndGg^CA_q7hUYt)^#|gwH(0Ur*0KZu|Me7Kx=n*rd`23>lDF;hVBDp@E80OhN($K|^=& zLr~Z)tyL~BMxFDe=hd7&4lV*T*;T+b{_J2@6xDq>^#~0mlIP}zHt*D>;p9Lm;W!yM z4<=TI6-@?mXgW$SAw2Lw-NJK8MPuwM4oU#LM~Y%Vpj0k!U9kLvIZ-J(!_J8?t^3^| zH1xFcOU8WU;zCttDO51S8U!^nWp@>B=DhYvg;(_AsJ8WGNNDUlj0C?D@WIB5y~ZwG z2kN%hRj#|cw=QG4iqMCf^|ge!FV;?W4no&0h2F%o)8QT3rQWRCD}&c$=DO>S3|lX4 zo0OU#E63Xp_B+o~wRPrZ!btn1ev|Ns9$qxWawN`GsNnGrkIV;_Xq0Qc=Jw+1fffV7 zS;tiX>DZ0JHqIWQkdFFYTaxl!7j{a0#-*AXnLK(?inh=T;?xQhREkhIG`twv{9W`L zY@7!{v7q*A;c-2BQ4@#$XS8r+I=7o1Jc2(BR)z=}ZB%9H^>CeU^zJxa-a6wzdqQ;0 zASg&dh*sKp<|Zm_ZyKjliFbG$n30L;<3-Dw#c)r#QTW>Em5jURT9ELCiD)%GRAwau z;U8r~Fok-s4SW+3djAiMF%$G`H8!cr^i4xY&c2I;D9urTnLXB)JkVFZA&#xMD2>$^ zp4{{V!8I@4sK*6*jbv~Y%JPwwSwt7n5He0+o1F;a=>AV`LLmE>2P2h^0nbmMy`O{j z;eOIg=#=@CCKW@G%vu;xR()zl%%Sb4;>iCwB_#_k)5UBuKecGWr{S7hqSRec=yHTA zSYC#gN-is9CiWdiJyEfVcI3=#frUE?{+y2*!_hCG+JqH|ROJctDxU$7-(mc3!FpRE zqjMMp@8Q><%KUgr1+A5cQ0)Jz?|MJNHzBn;QarfA{ix!1`$zAtT={H4?%om0AsIXe z?K@al{TwfU#2EL?Y54y}m=i)RO>^|(MMe#^cq6TZF{~xxGmXS*WC2y|GdAj^P~<+j z+>n68(iZkmw5C_9ziyNZ!%~d~RPH{3 zzRasa@S@NZon+R9?nQtyTjtgOrDzM%FiDev+#h+zk10f|w5siusZIBgRy}cK9&;&`2QR`EJV(W1l;Oj@2ZmdIs5a!)9p-L zRH$eAP)ZqB_L`UkE%ui2bZD_{Z;s6%L>M;3H>{M^N^DJLK5<6wjydk2D_EVRY32CU z+Db>r^cXgBIhZ`l*ywjeP%x_3D1mt1I(z%GGztGwudvX>UI+!WlZD^mrXA%`%%#dy z=uq*PaR1LzD;;kgwKYI_J2&v1wstN8aH9@9Z2fJ+zg=VMn-2fXABRF%X44F%~<$ z1?L9qHM5y?kSO$(~AT0e^Z)G;gOhv3UQdu|#xY0~^Yf;a}f^^?$NT{PhiO*@(5(`29P7{o;_{ z8pHqa_b~FrZ;Rag`Fvwr;Qb5HRek>AINLt+0l86SkyR>Y9{=adpU1P)O9}%_7prf5J`EOGHTdJJdi?tVrb4TRyvE|AGQ1-JBR!mb0U5riho+(V=}7)K zGsUhjex#Gfm}@y>H9OfZgdnc*^FRKhKmXUQyFVTj#g8yK_PMnr@4?TbzS71Z7AZ-z zDqT`1)(O>KS*Jn}oA|;en#lOAvU^4y>--)36 zb<>XI55>e2CW0Pa^n9MS6LaZ`z}&tg`%ne+^T0s?E?V)g7FVv1lJgY>K2P?dR8auy zG@=JD2@kiB3rGITCmXX|C&!bNb;o%04!!w8_IzXf4`2?~`SBh-%vY2vc}rWbrY>Hs z;Yl`r6wIh6Z0ODhu=`bvIs5^Eqt1W*Q$Q(y$mWAl;obdjC#{2X%(uC?}O4Um&(co2MjQ-r&^A*phy7wG(D@kV-NQ_B&JzO$ zJ~TfDirRPJp?MMY&L zyXjBG>=PRz^$Pd)Fq3oEPm@0!9`*GUXq4Dp?6n2)>!YeoPD@L2GJdCeFd0vvYw8=i z+`Af09F-{@#Ww$0?$}Jf^T@yJ%FgY(ci-BP+ZSbKM_9E-Wv=sDaLv7=>WJDI4cu87 z5Bh2NvwxNyUYX&MT(SA3omKu1@>z=Y&ebaY#C1(#}xdCD2@b+qQSusE2zO>v|Q zmCltNaZo+iv1D$WT$gT*aXnV9Gzz|6twX|Rbx4B*0^|41s( zNVD?|3*~X`OSSLS$lu_v+1N;f>wTj@mtTg>M5L- zhn^f_pcy&mzCAor01r$s=&;_4#ok{V*8^LF7kLU8B2AbNN8j*+8GMs|FZ!uowZtlZTMrK&ICU&9aZ_(^?UR4DHC?%lh@>YgnZ zewR4}th4LV?uA2)`bs?HCay?J1T#5Av9iiaxwVec=Fk>KeN}cd?M#Lna=$ig2wy9U zX!;-fmbMO#oLFssy|6hsjr{mOHmvmF(_ryGHmrC##o-?t)($o-zSn{)g0`ji9{Z!f z3$xXGQ(6e~T-J-bo#UgXlldO5=zEsy6Qn%0KO0}W)e*AWU4r0TWZGN5!Of*;|Mu-n z1Rmi5f=dzmHgfOPn>X0A)#N)gzuVh)g??7?DM-ofh%V7y9SPf6;eRK*?)vrT^U0n{ z3j>6(8^?W7y=sPEOIK_-;JHh3KJ)Dz0uj!j+8nHhfwrwPsP-#r+HCb%m6%O7fBdF- z!E%2>UB4s#%Ez%SS+P=zwZ@x!EEPsghp zQ;!gfJ;SqW$Zx|o0{4xM0;c)p%(QbE1kaCYHSdTP|L5wBf;*alC^rO2x|Z}RZT@-u zW!lmd%y|OQYxCHnGWhw7^TBg+0{Nu#VWFvGTU$;U>kQi)T8PfPvU=wZju4VJfz$_! zz0{E$2B4`;FLOP{XE%_X&)LGmEDiOgXQBDsVBtWoysV7)ij&OC-j%OVrs>53-SXF9(hf5*y@F<;!k0rc>u2BROQjMxfwT zEXVrlWBYQOeLHyE;Sna*=g))qoNoFp&sqp0)YQj61Ozk+eqGO4l`IyOZNu>v{}l&Y zk}*SG!F>w{3!@&PV|b`sc6v4(9`plp=CIGiTV>pWQe@7)T-UoXD$67)FpoR>Ttig3 zm616QqN3k=2Z@7|eq$Sb<#h3&27QY^$>8hA(U^t_jcd2*c&>dY-+l3z%Ums4se5py zS|QueDp!!HZ69gtLqMtct$y3K0zNMx3@-n#M0?mivXtufz(b65TLqw8E;*jvb!ggH2N zSPgP(V3hjHy;!5FeK*pH^G{?L`0aNX82l>j6Wly4R7`kv%*Xdq`(CT^Dr()`m>P61Mq00k@25sFC9J+jCGNf{UX#wb4X$RyKU^(z$E5QLw zIZspjAvILcR7knLBvd?D`mo+ER8cnGx~*+7Rf7Q+yxoD+SsX7EjN)gXRnMS<$)|er z2>^O)WK$%IxcSS7D?j-8eX65Yx3aZO z?iV2sp$(yrDK$mB>`!Rh$bI2K+;2QPy(YRIf65K#Yz>zMU+Ix*o0>Y8xrzu3W28~~ z!UD5SicChmVWy}h7sK#u8}emTO;CHHQfeyFi>(?g;Ocb7?H-<3cZ`dJ11aG}u*T6k zdpn%1TnU@^%XMOw*s=0$%esA@$@7it=V$gUGU4{AnXgPI@wm9`T{`2DF;>YAJ3qVG z^9mUwvzDn>d7c*vtH;K&foF_BT36)z+FC)xy;?$XOG}p78WOYT%FJ?=4Q%r1T}N0~ zrkWC(8jd9ju?3T~>ZpoLCNKPTx?~E#U`?V;f(t#ia zl$6xY@s16h!UnQC`L^Baa7(?YAhANaam{0o(yGOw8hZ7LQueAns~D`ltWgBI*DF&f zXEJ)JF0f{$*B%~;2rN&&J7;Pt@-63NRR~w2R^?>Q)!?jttWHE_mOv1u6)ZwTwFr?k z9TRm9Wwf=0YyC%T@HL~UO%Y(0^?d#x;Q``lcwa{_=0&5IziO>2K8+Oe9#qkz1eTSM z_{-k=!=}!kL7ib+R?t)Q---eF3kB2P)dNER&zBhgUwlY!7(nY+bXEEz6vH^u(0%bd}GM zz4*FyqMt@}PqK~^yxHbvOX8kIbP7BTq@*;;m$)Schl+W^#q@bbE zZjCPi{UeAQ*=JQS_n|9MBFdUp)=njyU4%&(lcb|nZ`rYOt+MOba_%y#>k?nx)4Bni zcxrIld>qlqFQ=nCJBg^r`V9{{yX)Cxr>ymiu#EcqG{q>m+(@s9&6* z)AWYpi&!I1IT?#mEVQ6PO~<%DyQy^YM0eZTtOL#m1;){+F=SjR$yV=vNJw#l^oVWJ zWO_nxzAM*}E6)REJ#WWWmf)N4hfpsv9R>(1g=%R?{mr*pT3AHmaPuMtp!ievo6T0S%=Jdesnv@0t-1s6zMO5WPc{gpf&G%^WD^B5 zDu{Cf*&`BAL_xyi&0UD!oguWJUQ%r}OAKWfvZs~+4fXoqcWQp?Yq7x`Ei&uv{_=xz zuNF*Js1_q13kewxm2tvgU)CyoBBDp7Os<%k=;-KjCG8|&qQjdZ@1|u4FJ)MDR=(|M z+CstlT@T;q!<9=m!=*~;PrL^mITX#o8yG4qcmDOdAcFcO4*CcCfzm#qaT|Q7(Yjc- zh9d4B7o_6DIjhyHnSJccfdKn4U6`CfTQm9Pi_G`$k72+g6$3-xhEuAKJdNxFqr@q_ zndp`{&vj1Da{s_CqqB*RRRY&AuCwnvQc_YXXvNtM)uNzB?ouX*bKTA0JrH{cb#%DPos* zOIT1b)!G&$2=$r$^fT4*9OC$AN(a2ZYG3=a@8!Cr_rAgaU@NQ6C!5-NL>i;zCL|nL zgJO2WUA}eO0tj&qw{;=n`-XldX%gU=n9w5UM)lx9BotjUN z!<(=9%C}k9+y%P}Y}j=Qn!0|?|r8Q*B^hu&mt3ihl@XIFj6&` z#lOBoxE(phYH=tw`VkpT=3?N{D{L9gQ_nc5iY`Yc2wF>~Pe7ax5I^ z%ho{W;^HbXpR8Zu?!AhQE&l3Ubd}_*;UPS!`i2INu{`9o*2~-XL|i+oCL_8F?M2KF zG|;E2uSkA|w&RQF`*$qjPh7@IF2bEyH&1`gW7T|M5-h}zsy4xs&cy^BN z*=+ejvSQX_e-BocqG1EyfF*yufMo+L_9JdZP3! z+4^{8>GiBj3+FYlaH`wP%&`iXZruQ9s98J%1H1K!&Xr03QUkcHz5RSrl)7ge^xD2H zakO2%jxh^Q5&^Db0`9rWST76K)1~6;CP@aiI!CWV+NA@-^uQXgYo&}uUQ-7!9|>Rm z99pGMEa$O;O=asfGo5jxCoFDv0Ts~g7zumWuIVg!qIZS7*bQbd{f^2v;zrhg$HQw$ zYS>=v%ct&8<-A`(j%_cUCLPxXXpd2ECO!<6j6+Y;NEX1UD&;dITuz^4OnjuFW z$-2xHVrIU`&0wdItyUbmPwa+*SE5)poq6n6q+qu`aGxfxA>s;t{Tgwbs-DBKmIypc z=Y}?gBoerZ(7kp%8O3fXeDvLVlT`_ z#{?{ep_^N4?bh=GT_d*`Hnb0ml@Qo_KDUI#ve@t8)DW}BZWDA)&&-HLNb%EX*4%~e z#n}VzWY5*$=m=}1OMlX)Np|i_R}C>!f$PK z0l*+dr2$1oyH|@%swI^i2*tzC9(nDJ8x9}27S7i7DT%KT-@+k2>LWy1!gX(dq`kaN zkYv+jUZ!uY)6~fbPNrcWng%nVhLTI)8=Y$pf&`e0WjbQ>$rE7#vcg9US`FX z!fsm6SP*^aZkBSi&J6OooGoX#E!y|*bvn7zfdph1PwWqIzy^}idGFR()qIR0Kfox9 z;Be@14L?yowz5*UGq#@v8Pdyn6}luL90ULSX_n_ZwXj`+>2A$nw%F8buN5dWd=tU%Khl)STQ3Q{uw;1 zO%!fZEwsbyO_SC>f5Y#IOZ_nD`_rL9djd(-dY9gT5=r#Em0{5sJIN@11LA}|S8Lve zcz%JpVQySkfz{SPL3i=$igno>V}FNZ$8G;o@WW-+47l|cD(w3Z)P3-(i1x<~J^cwE zg6VxZYv9w=C4XdhKkT!(NFldFx-J$sav##qaCFN4Zgxvn39Cte{qM}P+>T2RP#Fwh;#J#rLQ4aVF&0gMiQt2) z+Z7+e6v*t;IKmF$-tWYi29!dO2(C|ub@-a|kFx6wSh)1$vSS6h9~fk=qqRn4H4j+` zZ!Oo88}3buz@Efm;b&^^=eh&qCZf5_8nTDwj9|yZkWG2ia1P~lQem~HzNzbnu0FPFmxuYgu+QdO&ydMg3Lo?9Q3GnBzM z(i8LCk6Gk;;&;^v;e_jk*%rYgOpTqmR?0l_!6W;~rJkkaz^j-p&(d?v4qewI1k6WS zC$Mw#3)!8-Wo=v|ppt{9&FS|paP$?6-@CFdeZOPP&e*&QmJ4f)oaP!$tGuN{MTZuM zMpv$AerJ0&SSL)vKKf28J|gL~MyIja)gR>?5qT#f=WaqX{Vm@n;-71}y#wD0cKuWR z=P{~ISSY%L*M9v+q76NG3^qj6iLjd;QSjIqy{oRi=uhm1fX+c+JGPp>m%m!?j~9Wt z{9n#lpd(l~P=nNQ;0{G? zt=rW?lpu%SpJ=guHZ(fgTfIQdpuX267sx0frcv{DI&tJgIdQdE>d?s^})Pg zR@F^ja$Z}rIv9#C5#S9lHb*}hM?p@_&y0&Dv%N0 zNOT)pzqU>$(w8S=rlz&|;PO$2WF#T$vZ{5kIcdA0DQJ70oX_(8^yw;ZiS=%)9>AgB z`?C~fce!sXSQkc9ayKfGQz{tfC=*O|^|wKyr2|k8RM0F{WTbc=>O8#3wtOyg50fyG z`1sl6z}^m3K48yM%TDKrDHKa;gE_It*@Nw~qus3cUS2^!^&}@a1`Y3YvR%%q78?%q z3HsLGowO=M{@&`Ivqw{Pyb; z(`)0rsMx5XF+5E%*Bl=LM%XGZ^vk&>?@~O749g0?z)GK;`68FhA_n$vfyFr)0>*K) zlG{WCF~%rX$sZxX#V1}qD}tzojGYsiG7oMnmK*m9bUWW}(|5v|B->EQ18Y)0If?D= zF6fE1+%(=4a*u~68mgMDMNS@h#;L4-d3E@>ZqPU4>|2u=4~ui>&XfHF zLOEBR+>@>&Tfs_0Vdk_o)+IC4Z@bs5nQL`Dc(rB|@dc|}0Y{Rhv+l#wPed%Ih#9+V|CWe>wBTZfI!bNiH^$B&z zg5Lue1YSNg-LsOPHO7w^RM;kECW#8&biN1HvO2ah3y*HMM}>H9{&en*fVpzGga@y4 zMme{P65f=RJHum;Cc6T*-u#0KE~(x%9pcFIM%0;fk;S; zV3z9*oLluh^B6P59&(L}&kPK@`j^k4cQ99Y`J>Y0!9lE;LM5S2i^DZ6EHUfJn5?$s zsoTIbNxpb-6ROoT@lxtZH8`sA;52bO*w9MSi(aZE8Pv(WE|+%J3jQ%DARWl7MiwmC ztp+>j3J9=a%s!=0v(P!WkQm$4HO>~at#WVY9BX~R!$Z8XLddy8fA{WpqVqZ>gfB8C zb%6yKH<-OC^5O*r%rQ+h4AKT>=lWN$&}@n3Ao+c}qY0|Y>rLFYM|liKvt168*$sM2 zE*6=cX~?&M9?J9f+wa-@{&>%?0r#xOM8ZT=;cV@_sDy9^V6I>9 zqNpEf2Q&<4&uFE%>)W?CvD}(qC+7veXCJ|ErSUknd^4Kffa&C99WX`hy#uh{dv>@L zW;i*0@8tA!xYQk6Pb{LYt*z~gq}Uv#j%~H`et^7V%b1JUVpZsu6N8`#?FF4DPu>r+ zK2LTs1KY~xu&i^mFRz9lJMFF**pWlKg57l~8rs3RGRQ;QEzeGXU3A)CX#&~M#wK^e zmm4tMP320LbwwSrQ^O_v%%vciMj7n4xU z4zBRAQVU&4PjB4wk@w9|EJXt+9%o}`h-37CcN`oZDpyZJ2?|KOFzS}Zu?Pqx=4a@( z)@g8CJG$PxFx+Q}^&w1FxnpQdDm!pyV7QOZw`1$-dKK=*6xvhYs>beQFUCxp?6}I( z8#f7?u?0`T9$M_jd0`^vx%O?SqD-tt$13fqy9@Fr%Y=6`H_0nsZ6hVE=bieKY$kjg z(|q-Uam*2HWjwNb$=djrV-wsD!>_D!&dhSIZo;_4r@jj$)$=lH&x3AQ)%Z}cb=r_7 zCnx9M)8_~ekrRF_#ONHJo^F7@Hdal#Ud(Gh5PuC_lYyqng_N|fp1k*J zIzFnyR{4=_L=R`8C!DO^-Q7dH52lbvaKU=uW)-hY;4J>&!`0o6m=X#e;**op>v@%x z&KrlHm}ge&CeK?$A55R&>SS89hmn)rs6DAZw(ADMvOu>549+J&G#v?Ct%H*;m3z$v zKq4)ugME4>B3dL$O?oE7}V9S1w($ggMG!vedFUE7Bayje(BUc=E8V zZk&DMq|EBnDQTV>h9yhBoVr*ySZY4;NvWIq5si6*vy(o`jdN4LeSeL#K)=Ov^3+*) z^xN1_G~23T^nLTr6z{?^r(HJpT#ZhkJh{PY6eeU(71q216f(X}1pA9+H{WBSgtIit z{2EcI%1G@NhXUPf5}j-&a9~M(x;@~sy}J*#jQM)Y!e<(_o6Emt2or zgMg9;)D7vnOSt8&A+cXUgPL#R!-wDMikmw-0|tE}!1p#%z0#gA{+_8QDns%O2w)2( zA-(=QX!DpayfTKmKdV3G(c1-Yr!r@%BSoRKPN;}uL(5-^j?d&qx($SC#moG8^P}kz zqTa=6>-Z#5(SZs67Fo9X>CeysSeLhRH@n1l0oPY& z=$Hx(+F)th3ihg`zA>$}*;xzP+eXz(A8{O8l+-2F3BEniU}H_d8FkkV4vD}CKWEsI z9Xm8MmvAU^Zfp~*sR>u_0*%EAaqP$9g~S*x3&Jzk)gyU6KJ6oRk-gZRb*`J(_SoQc z`L6oiMKtVqKd&zjW(Dux$3#azez4A8F6H_D{mb(MemEps;C|1~UtL}n*Y9HRHn+$P z$OmxOU*~A2J(7EP>2#v}Sl3jEx2Z$)`oe1^+OU4V4l^6ul2`E!Fg8%y@67mCl$u&3 z=e^DRpvW>`^S#x{x;H}3Q2BR!QEKd2_r<}*x%Jo+lW3+IpO_fFkisXQ^sR@xShfeW z$V1fz=m4qitZ}@?TCHT;bH4|83`Mn&Q=OfZ6p=)zwwlb#+eGX&+6O#zY&c#!(ybkl zo1*BGweAtw$^{L_E(6&ba00Qq#DTAUT`ego`_%N>w^iOv_b(wg9k=FE0j%0t8;t^H zPYb#R%FKGJkM}Jtp9NM{dQB(AkJF>Oow<2p^c5|JG>1@=3kjYFOCRsv=Hv4_cH55> zu(7T(7*VNHnxrT`0sa;$w@0hdwvMN}Y^Gy3Z7Mr~M%!3-kN7!WYEuw^ds|h3d1YqI z=f3K|O00m54?F}aRm;9r)aVKwy8Er>O0Mzj3t0eEZft0Mijt1j&s>*e0N=$Z?oF* z9e1sCvdF$Pw}YX3m2b|pgTxMLBpJE~2Ht$rIyJw%KIqyoyQqpiGxFm%9C}j=sliTB zt(tgS$d9CEkciv(H^EaD4`^QcVs@N%)5yl=FHc^=?SyZ(Y-_GFBBem0e;D89 z4&z_98gQ=ydW3)?&&d7y>W@1QNLe{gR%s$C0r<*!fQX7UVUM&vWztQ5vSu9NcfL3H zq?DaYsdB7=vfJ1rq;{@x(;UK+gfW!PS5A&PN3YC}$ ziH`KQmoZdb)G}z98}!U24h|;+Biya$ns?`vB>k{$=7}q>Q0uNb(tz!hl6Ze3*?ZGOI9kn;|!x zym%haMbnXjZ!@!?dP#?Z9U|#Q$kW>A-6`S`ovqOg3o|p?WwZL(im~brp;N_u-#K%( zhPpiruDZS)rc-pVUgop8MnKm%r)khIhQ#_(SZ6s}&Rc3B3n&h7@sBbF+hm3-D`5I$ zh4tz+$899L%c}D>4*)J{!n-_kt%r2$TyAVi*rWol9+>#P9-u zS9geKP(?3KsafL!@Mav8J9cX$!T`|0y;L_o{%*bT^s&EjqXlF14=`mh+|!~5*~Bxb zrvp|i3qXKS=iRK5b~JHQZuYzJyb*NT-$sZ&dGsRs$kW4X`e)?y=HA+7{Pn9}Yz7s4 z@M(RZG%y{lY1ufMPDVHEuDagfTP<8ub=j~xySTx2eI%6&=(`}G)Rt<;U;D=lP*#is z+B~zW?*G`s2l1my+2rk8GHVcKg{;0bHsu}~Ww#k252ZathH$!8$(CCsV;^cC5VGl7 zvhm^aP*4obI4O@ZAven^AUcc#tpGxLE;bSlMj!EWqzj?M)wicpIs<47wFAm%Qz5 z=EcA}Xs^7S>QLSx+{J9ym___WRWmaD(5NzM<<<^(+zg;Z2`y zjEORv?_&X_Wu_j+Po`Pzv zO48Jn%V^9GSSqymPVDf3?u8X1yVVI{Fy_W_og8yRn{X+t>gEt4LgS3`VOItEEj*kLmrofp>X*4h%hE{P&Nl5?(V)Iw*Q`#W2LusD-Y|T$-j|Oxg z1x?>nIc`~PBSR_)Q^gIWLBnCaI)aCr;ADlW^ah?>@x{&`?dSS1x`fq<92Hag^&z*fOd51 z=g;Ob$3hx8Dt&~=lP4_5WoE;3QGyD)wFdB^9;$S=uiAozd;+@5#r|12aF=}Ppr6SK zy2$#`O1i9Yi%;raNa<>KP*Ol*ha5~uzd=UU2_QY7l`|k+=*iH*Mz*WH&&s0TK8w_G za6@^g4-MbALDVvI?%I-vn`X81^A2(@jZZVPIk5* zz%oBw8*@ZaeV}n=Kbeotuw&sU0ojv3Al%#i%@$>@3!EWT=a;^M4G_)8+TYpJI;rK- zBv!_%{UucgVshlv20-eu{XR9AcK}xfNG^~xxA#0X`7Q7RC@dW}&CQ5!5E<=+n<0;w z`1smqtX^h-JhZ#;I9(!+&kGdF{JS+7`DD&j;}tt;P-g>N>i=BF^8)L_$k5jknAgx6 z?iiAo5STRidX3Y<#s=oHwMygGB#SqZ&2?!;x_2J4ES6q<0&qPP3Ii4=zRK+QR24kD z-35Mqs3;*Cf;_Ppd!{PioA!@<+dC06{$Tqp8DRf?YWQh3t^&t_V-6up(au!Z@0*1Ffvo>8cUp%J8_%m zLYiPiCq;DY&~<`gq-62sL3YNbR`wX=i^+pi?{&V8m~3e-D{LQ5F@KR>__{|Lqx5CN z9)Ztr5SJ(+1h{^w6aVHfU5K>67X zkdHw+5XrWy$kezPGe`40=_Y_-tS+YjhaVgtTk#Xw6T8H(@bOjSkgAt&Ba0W=+hyN% z-#c&0pKVeq(bg(xT^x134NsP>QKJjku5x5<*HYY3ihIUR9tsh{U=Ot){DPv zSPGogA6eX#dmPnc0hRC#>1qMv_TNyzF7NJy-j9#c33WlKWVGXhkTK5sPA3~3i?Szg zgZ1}rdsOj9VacUo-Gq!cl&ir#3*LD(w?sg}J5z0U)J`kxehO~zJ|Gg=YKVBZ>3X8p z!&5w&X1;1q$hT9d8+KeEmNR(O2B5u2R$C^{$!wfrEh8a0Vf8=xvorvS& zp;!iIXVkWQ&H0+5N{O4G@;rs2n0jwKrCd4ZUcMr2jz-*3DAVZ`Fw5XTQTmW`2==@X z7We6Uh4p0sAXzrEM%_9>2DQ&LV5eAYI31AVceun0p{%V_ebf?|<)yjdqU9ew8xL}F z)t#X8gT-c{!$H4E_o_G>=FmYWTgLczO7|b>isv`4KX5&^bzds}`gM+9{7@PCp}1p8 zRnGW|7Qi$4o_EP+jw)BnjfJQW+Oavu6>wrgYWW6Cj6b$Cgn_Pp?w}?4_2%JJB8IDf*Y#fWHmGu#*1)6i?;5t9q zmiEa-NVolS0k5Mna~;`b+wZsZB11tr8OtUyl8Cu&ied){L;hUaY=a;z?HrB5*!}cQ zYBokoXbX>BE)!zJcN4Ca@1v+g_Aaszn<(kGc@6;ky&$!C%a z2$KiAl^^dpOF?WQUf%`SO|K)fW9P64BO-XiAuXj9)HQm(gBMus=Px+qeLIF6ukMRX zV@r#-vAl72p)`8(eAUk)C8~hlbm<=)9*WxhZ6TSAmoHx?HV-*Y63;O^N%~@|xjs_u z52A~WRW8Bl`LK5Y+Oi&&MvfK}Ez^r&B%DV?qgJh>>)g#Jl-Fk9CRo4*LSfO9E-j7P z`}3c>K>x)2E@JnV9Z2gn{fJMhuxC^Mb5*tr;_D#y31{eHEY$*edpQ?6do@JPf6z)|Ww=^x>^uk&<_%1C+cz&vcszKl- z<{Q*;3QA3bR02h>r)V0mxI@nY-)}5cZfD=QK@4pd`J{kaU+4ow#7iNQN}?i8IJ zyfd)oqkS5vg>&OZ&u`Q80`UNgKJs)hRd)1-gb`GxBfB6zW-^M2kGAp5I<>aWj11aHHSd{F=Q zzq~#B`yMpIfAqZl<7ZQdRQ|sA>>oez=NJDw4~f+{VQFXe4T$$wdwX454scEXc$?$) z@$my%C*jlAhascfl@Df-Yu?q>XPm0=Bk{G(%`_JPF~3QuYo`jUJ2pOk6%db&4VD

_vY+I3cI=r0L3R;_HQIM3j(aznw)~5UbtOv-@Xk3U2I(D*phDO7p%!2~Ez`3LQ>6K=R+K zxmDk-<7KpAu2W&Zs^f4DHRFlR83PKJ2Ia){yaj+bT}W>beS;=7Zp)X<;^B-? zA}?k9FZSLuDypno8wCR>2u46aKqX0*ELo)ii%gM=B$9I|augAfAV`rjNX|JHRe(s& zIp-uf=kzVQ+wOkf^S$God++&q_ZU)+g4(tB+H1}E%xBIuADuUck?HBrKyu6*V6wvP z&Xm$@bKj;o0qowiHl5{N9{>lBE|MY&V097E#QXERLOZ0 z;=BDoq3$B_q$-Vn7B)BhROunJ>h@Qptl|iMrtd*ifqzWJ{pu!KHLSSF=2G~9l}(3q z?O-c3Nl_K25?^ns?d+2mC&ml?v}5cI`L4U4CCjUSJB-yIMdMx@L4HLVso#3o6v z(XD|MMLFN1cv)Hq*qoT!V5d@G zayQ^E;})lOFP<-$G_Lmj+!DA-VwbcQ*(*78b{IYvq&>!4_%YHwCcu%(4}NnbAo8VO z%N||HJNGI+8HW(_I&6#bI}Y4;h?IA{@7S5K`{@g!aRX4a^Oa}cn?_2uI*>T=>>gaX z`TW$~XfheUZAH3L8zJx0xC2ad`lBs+d5aJn-rqyVZ{VJz);JM6|hJHyDFo4oU4ZYP2Pm9n0ogtMc!g(eM|ny>_WM+;DAL z@(C}Z{2|EJT=9G_s=r}npI84;lkjL6+5MAZtDu7lr+qe$J@Kje`5OipjjJIheV=GJ zv!MK>W4vX-cnk{#`p~sz{NYFv}V9hvWp_ z+dBz`F&f2){OyfdjhgG#xF^G<#x#C*T8>psKZ3>xqPxFYnY9qK;JPjH6j%`Wh2{DU zd+WqS2>PqHeH@QWAI;~g*SNYh%n>|!KGfEKr>47C(wPr|v^IfpWD8fkj{A5-uGd+4 z#5iKftN-Nla3}3*{%VMm087BZfYlgxDk0%uf1lM{dS}xH_Sci4doG3jX`<#@gkMN% z#y;XUs!Mbw3oDbL6|b-?J~xjQYKbc;QYoJ0_Xt{OgjaE~L=lNqw;n!HZLAUso0`Qw zjBi|oJ)Ed-L5w=my4^;+2=KRr+twkWiUVgTUd5ilrgv@c+FF{o?c#Fhh!&u$% z)L-EjTDg1I@f`Tvh~3f7%Z;j)wi7;ODm01kiN6L=V}J>G-dV7nI0IV2o43%?zQmrUQtF~z zRY|}EjR}MmCjwtOwu`C-drGz1W+|ou^EPb3S7W_TN}@zL@2BbdKta;A1?(18EwZBq ze7(dz`lV13(cK^-HE1~3kowR#yL#2#PZ9zA4yoLQK`2V*j(|JFZ)Aspwb0Ir=2D)o zwI9LIzC}uB*5h}@l!+eQq_6Vr zi`NfPGAq&&xvpJ@l0dAelpT*3RFLK<>~~tl-Tfr`PD#aRPXZdkf%25q1$=al|XkQ>qzPU3CI+YbNe* zq;AYwX|u9}E|U(Gg~dOG5T$zR7?yOs9*Lg3vXrB)rA)ES4en%ggAN#4cIjuyw`vN0 zo)kaZX!5c38@3w`>iKfgRiID)sMgdO8M0K6XKpCc7KjNUF);xDpe#nk#nk-EH+4(DX-hsB(Gcxwb z=To;bb9Y8J~2wj4rn_e+nyO{zF!`VOu%e6$`z(KjRZp827wzLg-mE#4p{#k0twI<4>O=nX*7Z(J zQ0TS2$kGHT*IRi&-{?b6N^7B0yEy1G^f@^AKHwMXn(30Xqg{@0WOIy}0N>x{<#*9F zccy>2Q$%x^qHL{cZ$()Z?Q*xmdOmeB%@c0}HJE?XY}96IrhZ2oG$_W z)v4*}X0W+;8@7dDR4e@osL*au#p<`LE1fEtd+mDtDS5(@H&ccjifh!)bYW2oP5JEk zSIo0dGwTcH!LCbx08eMMeHi9v9O~!S9tp5b#WZa+k!f6XPN2L+G!`x>+;8%x_{ZRN zKFFoY_!`Tflbh?1rD=Rsaj+|#?b%3=!Uq0U zSYX;KUODS`Vc(V8=*^?`hB6q-yhwVh92NuNg0X$IjS0I{9@#FX(kwDN;NvBV zA9lXJF4(6=M)J|fwc!Y2ePFbA0x#hd?qM-v>%fEhIakdWZQ7TG; z^S~2NoMok!49^;#%MdG&+Qjr^#G#4G47cBjn@*tasb?pFTU=mDOSM9K-Gh_7+U?eM z#3O0~m5|UlnqVEcV+Bbvjxk4nvX6ZR_ZWgXQd~xymnIzNSm=+gIG1`G!NG7?F0;8_ z08hUaG?3i2FVU6O%WN_eWM(8f*mXB8ky8-$u~%mgj+6HGSBf;1)_A4GrKG~B2nY#` zcheX9fk+bdW&3z*OUpi!qEsMma^AJT6kWVs#EG+Mk(49k4O<*8+@l4J+gfILaB%nf zx`M22ZF`q`hnM?{^0>@K5)Oo7SRkM=WblOArjOHslLDXJ`L|qmXBFG!zV5Ly1EZDF z86G$s&beN3bDl2tcMYpgw@8>l0YMJzERrfiDF=2u8z{7U_Nj!|kxkfne;tt~CJ@`f zqp{vtamPIg8)K{~Q;>1iGTOgPBJ7mCh1p1G)=;*{1NQ-cmmO^2Jg?Xg^7H8S4xaIV z*;glyH@ySg!VgTsqBQ2~h+%<)-dbPE$euTvjQ6DYwuRi9aF_GL!g6$!88jGYuAKG3 zV&ip8N2ke!CCBErW^Z!ZaW+gM@Vk}h%i`0 zQ^4D#$@8G8b@*`^)&$#&R03=w@fayZXyqDjdj(-eft;x4++4SUd4Q_#m@7IqkV?Mc z6}9flwss_2oLZ}oEEBBq734aWK-ZX>XxcFetA(fN(0F(9F}bKJ{zs$sunC-1vgF%p zw`F%d&U{BmGjsTuZzw;A2(g)qkX#(pYvxb?D47wd>`EM4*w@ym5teLW)|p5xRe;pd zT^@qEefctw?@xUafe^Q2EYzi2$=0>B4zJz9*|RfF&==g(+aym=Wx!TfIF#SE<}zG~ zcR?$(KJ)YxAuMVZ6t%85L7no3Xu0&pMAGb}Y|~v=^*!H`5SieH2~V7=Yv8ZSuhzPb zRO)#maH#Sa&iMH<+M>MkVzMkpKj^Zm`P!3YYFP2L&MhpYLuOM`0_=ZwsUnJ}y*E|^ z_>ko-75;G+}|L4eSLM= z;)8ZX_`az>ywTjugdYzoO;=Wji;PEps7XaTWZCD5t*y^mqA4ylkricS6V|FvP03F7 z*w)gR4c4P^b1np(_p{MzWevw=TYHn<>F~5%*zS26@8*iz+GMrzq}Hzej!Et0PfQH% zLKECp6-l=6Yd4sUf9l%Ug(ucL?Hz92xTC#UCB{TT5`qG)>aV&(#3xJNNR*O=&zpuJ z?#-^jIv+zMzU1(qJTUCkRTBDo5`@k{< zG~NHG1*m%r@q!Mlr+}q)cbGGYHI07BbbQ90NIdL2?!b-#?73>>gT1{gIe}o$w+h=k zUFQ5}&a;dt%V&~-yKAY~F#FybZ09WZ@H~#Qei2_mwFaBvligtk8HUW!>i(p0)b@se zy2i0!>gEBh(R#z|J|=EwDT#nyi{3RNDU4T~FA*VE3&N9d={ zF-f{@-yeUHR-QW1u216?$MW`5vfhH*bi)nlY?+iTE1WJsU+6wr*F)9xGjpazNg!4K z#^B<|4mD(vvG4(jE=LGDmqBx7en8IdM)iZ<#5@kyQ#aeCQ5lVOlO*P=d>LYs(y;B8 z-5rnc)-!%0iy{ZBb_t&9I-py_6&gfECc~Q=#&ni<>;n41diCNG{5dh{LwGtFle#PV zthfhfgrJLY%}ktjPk@=b)Or#!Kb}~3fNai>RS4Pq+#*s-^x+GF2SW>OZTuOm zI`IRuXV{sF67G1!{OW-R7iFSfh@BKms_vbO-o_{o;!!^eX?+o$`SHFgqO;Cv?)5cVSoC!@+8TsgTrkjZ!jkg||U;dkIhNHx%YDPZIp8h)K%i zaJicuE*xAB71Ia2OuLZ6uog>aIG!WibHcT7K1GtE($wm~mtt zl9tK6)baZ!=nM(gicJw=5Po2A%pOG9?6O>5IL}<|4QpvH(47b~UU2l{gT;?NH^Wm+ z=ERmX-rjs{d%*SPI$;WhL}-1ta6{2nz3nhc-ijDeuo=<=XF}9!xUKOvGtm9!H*u>k zV#@;tm)th)S8C7ZwnoWaZ#>QAks!vNajmVT+ zhswY3j!}x_#$_#e$gFsY@ab6_XJD^eg&&CvbsDs0Rv^hAzR;vRE)9x)g}7yHA*=Q2 zgM3RzZh5h!d4~~@$4>7Tv1n)=nJJiykubXeg7Lo}&*CY?@RwNHqZn!~1Xozoc%_#o zP-WZO+qMnOCpbLF$oc4xLd! zgkLtdkq?MO207c&aWOYAKcj@;k#Imy?A-f_vV?m3!f8$RW0tJ#4`jXfV_V%lh^8!d zG2W97m#i_$e@#j6lBF&oO(17Em%+rgr{@}cR_#36v|>kRG>#<_TRG(-W`BK`Jg57B zz?(ekdU0rcc*q?#jrg^W%4Kg(vSTP*3lI60o76MP=26bw*p!@BLZh?X!yZ0(NSsIk zsi3k$+S2m&VfB2qy~k7dECHHQ)Ai)`fCLLj<1?`q9`5#jMYo5dd1d958lJJ4rWLMl zq&^8YXf~^pjzsZgw2l+~kYrT#<+G6tJ3$)#fU8>L^3lw?jw@6TTu*y6n!71{G4K`r z%4vK_B4;w1I?ue0Gk=dbCz>o>L`==N$vrj8Z&ok|_om{mQ4;y4Rk-w;{CB$GWOlYl zCN>U>B>dRG)_nJP#@Kk}uCEK?7xNN1qgLwMMwEFl8tNZp;YZ)da{evHr;IMaq%3PZJ$Yz&c4T)CYtO;) z{qey@@}^g!_#{PU)7VQ}N4KzB6HjntSTpP1PcO?dNW#-PpvjBXoagnJ))He0oTLYI zC_9Og&5%O;AJ3LP$cB!($}nt3ruw#-C*Zp}XZZvewU@fKltw&2olA%aa8w@hjFrpl z$4@Lzc56VM!1v&pDZ3qvDG&RSNA;G?nxTCxe4!HsWet*%xyLKQp;KL=SL0Wi$~^rz z9PK{e3P-xD@{L6VGCb*hdXXwMuPIUbc9F~m zeo6t*_5#$&NXNVqjHh)SCWe5d^0 zmnaOxBgo`Cj@C%szxb4{1TO=bmI)NOqQh?ydyoEHMK9}B$^?Il<-5>eH)!jga1Cl{ z;Z4D&Fk}FoU0m;1RVsW=lGzaJN1tiUPfycT6Ee-)in+QL9J3>7(idmkfSNs+JS&;T<$X-avMY1p3*25QI?eKpCFeunmP3K--c+pi}-nLTNK9Q=;Et^dQ+!$_*gQl>6{ z7!BUisFbUBNzjt)=K<3otbOL;t@>{t@o-Ci>^S1Bvc1$<=cY4m$61p$%Vnkxbmr2` zn#2!T((>$^t*s@Q?0*cxw;YPj0pD>|gq?Bo_Y^jMb`@|wIW0))4*)dwpL!XHL!-VA zx@9p3W0_11g&7y@f(<9nm2tinxp&Ut}b7>F9*?6H&mg`0siSL%x&sPbe z`p0{C;~CT^+=jJTgeezU@w~g_dHB+Z9{c5@BM{8?Kh?|b`jE0ta;9s6M?9*A5Lx02 zGhY7&BGM&#g}2b%2HD6Fyn!1QbPR%$K-sc?`KRy)@^3~xyn?c=VUvJPVR0csUr54^6s+_Y$GUQ%G{xx~*VUF4h<*eHg zR2oZ*71>b|OUUr$@xab9Z)+3gi&W9bfK`R>^Y){sy>4fm;!-5s{WTuK5ANU3(fVFC z{KbB?OOPE5ngpQNkP;^$`m+JkEM`oHTVCM4&k$eniHb!F9VA znl7rqs~Af)DUk+Ko=J$3~i*WzCk%y{upJ^$_aJ(fB&yT97ld zAe5szn(J2fE!tx%E^Y7F-(AAw3a6u0^VLs8Zz`e^eBY(#VN;khr0GnWoW=RxZh2Ek)8l^m(4ri& zAonfi7Z#o^{jfSaHhD10`+HAj@I|taLs9{ZDbe3lu*y|cdW!ejF-M%vQ`Ho6Q=Y2! z6kF|Mm{sB54h@b@it%$w{Pn5#Esej2Q1?cj|6)QgYVge>K!g1DpT?uldDOj2e!q`j zKe(@b|FeJaAMd1d-Dv&wyUW*vk;jZc?cuNEy|HILP{a89$i(~qPw?N5K{}y!Byc>6 zsE?#rCHaYjTz%g^!G>gUo%`y3e(r@ zyTMP#<3pKZDyhth1T|D$vM`h;xc;Ss%%vGQN1HxWKFH|1o|=~0Y# z&#G*wv}?paB}oZSmF!z9o6KzvPEEJX;wn_>Fm4@)FP?Mv%U(on9rKqM&s(5e8*D^n zZaK9%Id|Qc$*|9JPuG{i_A;MH4h(mZB@X|4_J9SYCY$n6kNH;Fo1QjexXp|6d@7^E zd;`U@+@f>R-49jkn>|CU_kpeaP0jrrf|#3L%DBLil9iOS$#CSeU;ef}eqz|r_E?pX zP%4^NKEtRPRwkIt#{Bo3C1`qEE)`63s!YarDz(uTp9Gq?h$BwbCbJers}uOv)AR1r zA^@naA3?G}5eivhuUN38yHS`2t-8EP^9*5z;P$fNTp&&jzMA$^dIK2}gUGgDXdmdF zyq7b(He%QHd0J;~VL3Iz6MMHII*(pRXp*;ss00%u-HjfF*Q<3t!=R-lbRFCM38eCD zf3(E9pN&i)DuIlz4)nYS8VwU>0+V&)@H(5LC8kHT%Uhe2 zp{_Uo8nbi^3)EI_M+{FAh$qwnD=I6E#k+ty!5TLSr+#`s{GpV`eb2e6sb>%p&TXFl zTn#Z$4%aIdUzJv%b56ODYOlH>WY%JGyfjxkrBEKh z(I$8(iZf+j% z=+UG3~QBn${+dBhy zM9eTSGE<^Ld5(f*Zpr;z_+p0{e(#L@EDcmj=R#Nl1}UyzM zT;?O&6;b|KGt0}Fm={jal|lEcf=-rhA#zuBc7v*nMJ)%>b#rnsrB*vS1%fF7jRZmX z6=k*QpI8OcDKmGN4^&8{vfIrr@CSp9K6!^CYv6W5ttv1urm$p6Qe7zzPy|Yx+{%PyWryD&izywQ)UyOR6NhwP&9`5Qqk2^ ze_>0Pxt^S ze<7L5PS42rY>=qrrcNkD3BxZlJzkRNHV;Q#sSYI!I|6Y@_8A4s+~5rP7Yc4Uc75q? zKyD#fi42rc{A;#3=hj~6$<0_${F+6=`KTCld4TkkJ<&4y`Sq?if7D~Y`fu98V#aHg zQ!SN1M)1!=p%91&WhEr!_gOVY1&B*Y1cAMnRaMIj0s>M9n@)k85QOz>#N`hG0Z0`D*ijI>>)r|>eTz7jCLO)>ymPvy+NYP}tX!Uip2#P#hA(6fGU-M$1i_VA! z>9V~oR?agwra=6FL64~w`6BU~t$kVc-e4z4I_vDGa1v)X(*%D}1JqsvazF>$?Msh@ zJk9dZR#Mc(`XiwJto!AWpy=?hYSi(zAFqAK(~R;!y`)cH_F9TnGgW(EMVFl?e=V6* zXrTUeUl>!oL%Y3LQqkL$Zuq0wlWr1y^1`CaeHp>xZ$5`pnVs<{bOy#IJMK(&joVKq zM{8^|jRWpROd_ zb|1hNB5l0J&C6w(TaB=kyt⁢>xGpYO5g(oSt-CTURyKJ`;SkVo?IykhvJH{4ws) z_91m+c%Dioa6Ov6$A4X~rOpeR7t%sz6A9sI=+se*yqxhMJ`O!tg6-+q?ZuV)nhe-( zEnB3d!S1veuFYJNHUz4z?E!>2z7btb%iN$m3qeF>FR^PDy4~fY=2X2j*LP)vvaFcz zKOiMa8iwnuEH3zs0Bs^>79o29{*>79g3KF@>e)U2K#~}Gkq=)Ctna6_3U-5kbUW(6 zT%ov3MyMCXrFjyGXadl44j@_w(L6KHA1?3(k+)78?ap5B3SS6$w4zrQ{g;2mkLSgO zF#7S&(-D4mFQr%hI%=px`=ue3tT6geY;EGd-Uk0s8E0GzY4Q7))4b%gD=L+zXZcqw z+W$s%IwHCt7V7w~=Pnn~7=uDG-Tup$mWwEqg^AC_Xee+ zxeivUJ>-F%&yv%LLr?*}`1Mt6ek1#dkC0Bf^QI@jp$R{!6>6!i?dC1zv-SoL;wj`Z zu|k6_-5on&Psc+IoNRQ}h-ECATv7C8fAZ^LQHRJRcey}AE%D~SMsHuh}3F+2^r9D>ihRAjt+gj z$v0oY4$ntC})cH2x2N=EsW$1=)o#q8SqxPRNgtf z&s<`1YdXS2?4Khg;^u~?ks{Bk=GZC0ZFO+ylLrYny@@omva4Pkuj+zIR@iyv8kc97 z+i?(;(R`-fa@TpD@nC7vcgoTXAS(6nsyyRVuYqr(Ohlj>;&FanX170dlQa)2q7 z+!DiwAi|g~e!F^pL~Re7Kk6v&62J+OZ^V4hnpT(G^jr|9s5 z>*$a}c`hz?pdcpZ5fFT9DyzubU-t_Ehz52zF*>jXmVOjJ-5XBhK3O?AIlWMz*3*s$ z>rU%q{lGd%^p7W|kdd!d+G5JK6nkXRlqVt~6?G>$gGJ^tBN5xy*3~)6Wzv{y8Ov)6 zrSuL|n)*TKWH!J7;?ovWuHkk)w)vL&VW9s6r1yfC{Kw0AI;oVE?Rj3jXfd==0?5Ob zk(*l#pb=|ZEBSpR!R%`o>PU&ThdXN@nG}nkzWJ#4*OHR0SIULypB)6mPLB`}CkBx5 zu2{OnynFYJ;QoCxGOgq4dRUcVBv4zX3m&GreOm@u;j6|-fUVm`Q{)e#f>~)>f40bu z^!)snj*dsxc6LD^)dK;q;O>K@7j?rcIzZ%OZNHQL)g(9ISKtB+|2cn|eutBxIxbS@ zNVVEz(Ala*x(Pfa)z0%h+~Eo#IIBY1cc4C zwXYC;#TjKFN%=eIR8UcP2FaAGdqqb_+gF5iW$+`kKQS6Vz5{_8!wOGf#eJVB(7_5- zy^=|yUqVA{BoQHTLG%FVf!QpAcrZ073Ydj()7lUNE(V5^5H=ZoCEc5u1pkPPB#=-B z>Q1M)z6~tVe@u48RM2*{^9y;NtDJMmvR|h{wcAyJ!y4hJ?QK5IFfDur*@`;6>vf?G1I}W+B!~`p`v5nL)^W!UK!gJUIv>TQ3$kY#G?4^x!4G@ zEc)BG1EaB_nh!MAQ+Olyu}z3qnx!{d}-jH=1MUY z>{RU4JjzSOYVRvd(fWBX*}0rlzd`rpi6&xqwqb93ce8H}@+uRcY5c}UI>TkE|KL1& zz|}&a;>Lv&kwRjWac)Yp>NL-|m_>^g+wukN0Nyn2%P-K3J3uzyQ!O>^CbdUv*9P|eF*?|kp@KFNMx6SUDY zRT>Ix+JCa`CjWEWE~52|#<^UZ>DR!OQ>Jg`w++^2?b>eAub2Tkn{zglJLKyf7RY5R zC9*!8LCo&C`zF4T<7{5l$!YE}0U=@2pMl)>gUJDpt8xH`d}|2A?TabX7f0~^r-=F6 zKY}j|4E3uYSj*)Uok&wJNMEQ-Ufg*Kk5dJuC@ZC-w_wLhVu)nmQ!<24s)4v$08Zdg z>fiAc&|n796s+(NPW+{VEzN$uPR>vRY5FK$}l3HY)|1!I*37WXa1R%h7C znxn-V#5#IZ#>)HMT89tc0}w!KfmIS5C<6!gY(&=D+$<+5`pG5w#CQY!=4fm_$!h~; zMi%Hhk=7jiK_N*L4}>p@3n>^%f$b}qpX=pLU;oM90WmWydE!T63Zx-D(WCvuqA z^e^Yq<#8A+D!{IUZa*t_Y?ZNV6yO)wn@qUr>*K@h8Xwe|VYkLhB48I0#a*rDabFYq z?%js~G9mi^&1q)C3D{HhN4|90f{}R^w5AmuS(x;qDPEUxcFM`fSY8lN>k5o)qj7X_ z7>QLLdzbOzu?|+ilsePtSNVAQom)G?_gVZ)heFy>stH9{n@X{jNIQy)up|QLr z^Xm(8LE(1tNrYVU86v&Tq)TlDE@a2_5VFRS?)8J2koYrauk6O0$Re1KV>o^`f9C4P zF`UR9<1P3fY}x-q*6i<)7K%?Hm<)u30edBvDc@))us*$>QCu7XT^!8RlU?j^AH=G3p{iyfRIgdfITc|l3zJu|i~xJJYUcuh@Fd2KugM!Q zt?w71r{73PglYMOjcnH0BYf?njQY|G5QqQ>qK6$iKY6T~66ap@FMd#M@`ip~TlNrc zm5&^5w2&I^AQHx4*LydGq60dsCD32)m-duUOtA^()ZzT*u3yreP?vJF{@T~0WHa11 zAf70pAduB&c}=}T{V`UyNcv03VqM*-y88S77Zm7NMCtjde1;A)GxMdtigVS^zH()K zFt1wFr1^r7j(YxM6xW_o+;DdBuENR@_U-dbG4drK`*&h4R8Qtx63wzMo9*K2NrnpbwzHBJwtFJl^*F;^!vN5p z&mcIeCtnELF5WmF*vTvG3VtEwdxf;y$p55+#yb)h_V$;Z>AnUiOgvIvsL9}UWe}$- zN$(lArpfCZ2~DUk1Ro6$dH>|}?iWapC66B5lPS}sznK*>)|m#Uq&AmOnkm(F1i4^$ zcP$adfI{?}&}#-&r(<~)d-BaOmycs|QaQEFg>$9<4a7S*&q(tRi-3XAQkVEC>BFE) z#>XEHsEcWrz142zB28yCN-;bHGTOV%kp{T~@0@F@9~J>z-YR|3u-n)l#wdA)ML*o; z#WU>=c3_q?{-(X6`F_F9qLbGzFvi+ijFW7vo@1q((^E6X^h8@ithg&p>9xAIwp~Dnr17fwnN*a@JFilXuYz}QrVa> z^PRgt1MXnav$~9wW!SteL(|Omws9I5v-vqyv36cgw5h|w_k3~4oraLeyxk%B`aoO3 zkL;JWmx%6ihPsU@;|Q7(6<0V+AWB00wN7X3r(=9Z5%UUprB2c-2?K%L%I}75BFR83 z|HqMTI-FK?nF-D*tNxovTWX7Z#G}!b3qi!JR5sZNKGtE2o|~RF;@14{3|f6G4%iY8 zxToY^o?*S*l7Qg*X6RhZKiHoB96R&&n7yuYLc7N8BDcZPNIlzD%dZ9koooR_S>`2K zhJAT1xylU9fH-M$w(p>BM9543^zun!;UWKb_>{Ug-LN%Om74;?S4=^?_oV_;2%SkN z0jAjqhlw|sV2a(fTLbP|FYKl}p5K-+G9Xei5~6=87)!eJa#MT7wZ*s$xEIhUzH}rI zM|L-=Zh%XCp^YD`^0!1EDxTNCQ&yhGw+#YwJ2#RUFkX4FYR>a()Iz+ovZ6_V?;I+B zxKaQDP99#~OW+L}>TPdl=I!osKzalYilO2yHv!UYxw*G$ev#7se?jnk7b>~N|7N&M z!aXrAxLTUtRO{lV0uEJmIL%PP8t;#w1F2|Y&zTdF+n)nNu6P}fmxBp;RUI$QspRq* zm(0s5(i!yhH$B_7o#(0?m zx3cSR?bRGyW>Nlmf;S0LBx_Dfmx)?LThWMhNjy(!Lh{YG~HSVx%0BGnHRIE&!q|J zHo@1jHVEH-HjKGo%>t!ndFLK>q>IMC)~I%snTYR>iwc-4Y5qHFvFEE}@wd+>NTTR? zL@j@DDYD%FQDh!%badl($_zA3>x9I{rev^Gr^6Q+5y_xbIuF1s<8JI$T>!JZDVVx~ zDC@Nxm>3zm{3GRe-%Tm(xhBU$p~0N44H5$FMXl&~ zn@^-)a=@WRMG8f!o?I4ZkS8Y973U%t?{{(=%~>&Sv?kz$uBKW~OEXKy+4g12&zj)q zIUH5A{!~?4LfG1e6vCcB_E{Lg+Gkkq$!;P2m~jIb&y+@ejb6V6FTRD4V!Pj;@ExF} z{>Kr%vxT_5X1*~-kVxyzuene`*t!9N^#is&R6~w zN2Pu032Sixla3r!Oe_Fq1I9GRESq_(L=5UxHuQI_Vy^3{v`c5}|Mr%p&J#w@m0aD% zyAbhP67)MUg(Da04}X+zR>f;hK9Sc)S7Mh0hsPsrb5H#O)#-dd=v>l6kHi=_CQOh%Z>U$;}!z6#}Q=yJs@FSr}_Dg}9X{ksdB7I-{h zX|{$q&UgLrVxb|zVr5n;B>i%vE2!-+t`C5)lWCa7vkXS!g0#M4D=lKN{sKXJYCep= z`9|*hcK(>=WFPfpjhIb);aj*L2Bcf zpv4pf2|vu5s~i$SuGV_@gG)=fANJR{*4Qcxn6uvFYryF5W+DsQ+J;{OdS6vn$w*rE zEGM3RUztVGs)Phd>YT{G&i=mTJDB6li(&Y!muCl$%yuJ~DI^*mrR6l}13n-=7RI>K ze443=WBw8t{Ni_iecwy3@*Z>6xIO-170j1(Wi~3Qx(2~;Bj^b$CYbMuKl1N?d_f?H; zwiFMR7;at3QKNVY_?e_nuRpklczP!7Rb4XKItvdVP|ssLP996&^(P$9cQjmmxtz9_`c%d}N4m+=5l1%W$sj0HMBN&z5r?q?cCPUl3*drI z_xL{t($dd)<%>Om``p)nM?C7rZ6JlL6GxV}YI<_JN`Rk#uaV-qlgrCMw#AtLy)b+r z0snobFex=gXa?+~Uh<4q4EBfF{%m=jk!po+2tvu4`4CfQd)5*5Kc{U?ju}Xj0(hBj zzkt>Kyk>>g^FVW+Q|ZPBE^N2_dHzpF<^2tjpOGD9gEQ=MZ)y}V(; z!Abu9w~n|D`a$L2c15cg?xgc5FF659CYbj6WtkB@J$+^k1GenU4B789Kr!7;I})Hh zU`{->N<)7D@8;tdoasbd4vA#j(F3O8oeF|Hu1ES$?9)bjv*NGeH9akV$#h}rBbhn| z)uCl)-`y!~A1Po^Pm_-AYno0XcwyzM8Oy%`EoHYDEm;8~_Dk*c$`)N-9d7e~ zhsI>B?d?He!mDeHg}6p_BHo+S2C!n7>-IAkodw{=(ZPY@ zkC}9O&0n}7p^$$pCk+P&r+6z3PcNYH$8QWYG$T&VOBaMDTi^6DRf|$3^@I8O`PVvF z%nKAAePxYqV{$iqS&nUEFzvjduJ$y4o*b-O=39>9kZl^VK-M;k^+3M}))>h@X zVfCdz`L*wCe;QaaSr^!=8Af!7N_eH7<(iIV4itJ`<^tYB%1C#M1%qU%!$f9l#+}0&V zMhsc~f;Q%Nfs;40Kj~LqFjUqp=iHnb{1zJb@WpR_LPsybrmY@%;W)V;0c#*N`R#u# zO4ORXg2A1DU}4i*6}?@LQPZCZ=K8%~gxAx&#{jR>S*fw{xh6nG@^}~l(6xPhm=7R( z`ttlQhmq?^!JQkP=ACvK>4J=k%?m;4SMszF1lT-uX_HY8Unsg#ETx6?=}N_KAv$7< zz<5K0_c~5_NVay1NEX>%68^1rTF1Ur zq%$dt@T=~+g;5q|OcX|URKE@&_vIjhb@x%9{{8P>rNHX}y)p!*N1f5*q~VGG4z2W*aL0ICMp!?Mzr#=v1FK8^5+ndX zstIE1EHL*FL4!3QKm^b9H;G5;%HSFNC( z1j+w_%DpFp#hL)f`Nx&)fe`XCUIgjgJR|gSGO$>hE^^e8k@Xj`rEXOfQMSg}z{@5l z{rMOH@xEc5_|VP|^S!Seopz)k>V%HXI?e6SDwfJ<;*_V7d&=#ygoaSUW4z*m+cF=0 z@#MGP4b`vesL{WxqyFNOn@#XrLz0ROWZ$3uH2Z{xBQ|`3 zHNM(@lAOjLx4xm_6KHfwx_*FGlqmGy}LqJpchdo(C@iXt=0Zn4%gwdAXhT_m$5r0CO3%Qwbn5~}KN+J6WQ1%@OU4cfY?w#Q~VJCTtw#Opu2`F_{t{*q`?3GxY4KjWF`Z*v1mPwUybyJ?$STF~#1v8dRi z#Wds|5YNMN)cZ*Q55ENdUd+$^VC!|ND);{>|4@CILIA?nB;C?ze|`gVfZ(L&~1qq?G@jy3<-J zb-L5Z3cBY128CV$xlB(^W zR*=5$H$kV$YoA4Q39XlVFYe)mjDsl0F~w9@*isSIB7jxTW2fE$0_8^l&0}kGJuHM}LB1ESU}z zb9f`8`LHiWI!P_izF>#!#>Oy;C9u@}k+ZWJmpQDH7Iz`c5(f&KSAM7?`Mf~e>9@$! z52(*gYuUf~>fwu#y{k*5a5UOBQON#Tvw?hiBbsfZ>SD=UV7T;*=-$1Vof!L>IV=5Y z?%?hun&*Z!@Z@AF_M;RZ^{NO=&<|EHGSJYLO{)W?UoNs4C;}1 z@|s#KSlWs$;u*F}MJxAzCJL#G{MEBf{4Pa2ebw0lHlrFk;%+C(vfiy4R^nK$%D~3X zzGF(kX7~cowYNySZ@XXc`cts`=rjgcgKNP^{fKJF9j|7!g*_wVvzZPJ*Y9$dl_@ML z+T7jMAB&I`JlrJP-pHs_#^=EQpsBJ{4@7UwsTv>u8>p-&+YAJdVlII&UtnWQ5MOBocF zLb`06m!BZk)Xu!05y|$QS}Af}u|uVivbC2EKkQ1)ThH!op1>#G)t>4B!}j)hayNQh zhfxb1LFrC`^>zy(v$U^!o1iu1?scZbYcNsBZ*z9a@9Np~YEy{m6&}8umLJ3weph+9 z_@5E9{cUwa1X)?rKKt-^p<>4gPC$X%Z%RsBLg%|?aql9R`%S)@b6C@?7dwt>xsxRT zF+hyIjQdTAbwQ3Jyf!Z`1r_Z+TGV%T#%mzm**7Glq^Jc2<1j8T&WDO{t6^>~&lkyd z&QT}(&$ZQ`adC0Y&CNX+9ZH!qx=DTaZUrHISdhug{hzf3M`_vPctKA!=p&ZcvQjeZ z{xN%4X=x2Ag+`;hnrHKd7N^&JSjWE_sEt%8sB5TWTH0FG8MYxO-@d8quP4L!Ka_n1 zSe0GV?p72;5dLn>Y}0H*YJWK*_{0R+V)>@ zp?S2~)c)6_8ALo;$jL1kUeZ(a*EJ76a@A>KsEyRnQpzGVAjb8R{^Y%;`uCE?43;%$ z`h@vid7qz!MZraBf28Ff#dQu2B$y$6r*Yo1Jm7#Wr2_KV`nHwvy zR`0>bAI56>MieRt44XNZtY;?Vmb$d8HBeVBhs2wbj~dW#E~hD`+-kUj{cH^VmeNj6 zAyVaB1<^2p_NwW7mAs;+d$cGH-xM#-$^P^8CG@Le4tWNN;f;rZYtHS%^PS3V!|QDt zAV#vYvzwZJ;aOC~h8S_)=dWuJLApgqC{@Zyy!At=@Y!8hJ;-i7`dZh%D+iLg9^p2{ z88-A65V9OmzsScIAH^ia=rgaG@*e8;TN!#g z1qib>o_nvAE~lUec;e@UOl#uO<%PK*>axe#D+t z-O%qzzzlAU6?Bb%7N26>pE_|b{F>t)@v!czMPWLe7J7J zlf#QWMhK*%o3k-t>Oa2<>C?(t!e+qgs*(AnM|q*+|L%)&c}fjt;Wj8E~bh zP;ICJGxoKqBRtY(#w-7b)V8Xzv2ZD6&TPHa>*;=Z^qZ(VpR5%(aE7It@e~hF zMCf+J;81iXoYvblks38$quldLWnOdJ^BUYCEWWS$P0V)7rYcK`W5@$J{juslcU>pW znwOgyDK%#)ZMz;iZ!OYrRV{`kxoTQlE9nlujpEy5TMsjB(Sw4^`%FqDK@iJuyR~V= z9z9_J<0MQGRoPT&ZdF#7g$=H{AJwy3%EguuvtxUYY{BU4W%0d}{zN74mW1{UHjn-L zRV>aQVN;}~4A%fymK^7!d)>lt5%-bjRoRpk^*c{aGcVOi1!mhQ{Bx7S!s7n#^n5P0 z(yzg9=)U#Qo%sf4r6nd=Xu~D!<9U7Z_giOdQlqqBt52I0t*FQN*pl4#%WogJB1;8O_rFdrVAl=rO#{Px!Gl{%A&w?;3J4`%SxXRRpmI#EiQN+ZNEkJf6YqQBl) zQ10vTZ`3f0MqfGpzJKtwU#~}#&0)cXe`#}$*O7bxTUNVx8VQN~kq!vr%=f@uw2;6X zcG<`cunF3Z?qJeh$NuLE#i;R%eeUu6M>!Tf4r9j?3;N`z76{9OF^I8#BuKQ>#{a{9{rOAe&PS>U>CE>E@yE-KcQw-)2*6?}bM4$j$X`45t~h+4k0!p2oQ*907S}*md-Ae`Y7MMUBo8#>z8! zd0fy}>gefplK`kfaVsG#3m8p z+z@T1fXLREY<%Og@^oZ`PfT3=laDkGR>;;lM&W+1(L>Mcvr0t^+6V!F04Zb`m?JZf zAQmS(52JgC8tX3?jA(`J&zHf5^sVMLHq{zpNzSE>iWN>QqGtkVRa5s9x>uV?uHv@N zFedCgtT=von*ze&^7EWFIk;)CMEhaf*57MLye;`037hra`=M259vmF)RYtI&zh0qF zm0bqS>kF`{0^z7^i|u6qVSpLfX=sW&b)m(AT0N5wx}y_C{G(3@D&nLCPJ_1RJ71(e zyX%cEI%l~4b5iW`<;w$@6*z)|t~RS@?PW|H9AO6sc9&0(4zxQNPhKBxR>#`59q+FV z*Lqld9vB!1g58v!s0(>r$+7An3lJtM)k=Vy8F^rwG5S{dEgAUdhtfv2Lc-z$kbv5a z$e|`Y#4xh`jVfjedJc1_4o%xd)a+a?-@tVdwz0uQpnljq$+gDzJu7sZZ$wM_n8!BV z=yhpgY09h1ps6RII>;;Cmlt7bYKo6GzqP`ZLTGNaT?MHnw5N1;%eEiGUw@(u{&9g! zlDsGgLC(z33W%=x7e5v*tWPK7}I$Yh^uX87Fcai z)zd>K&HMkvAeZ3~oPRJ#(jOS)=-uwFtWCf4psJv*?P=(!zl=*i75}JkNtfbOzs!Y6 z+>1t-LwtzT$h9;R{&Fu$mNENJ2+UK$5RI&Qn&0vLZ*zDrhPnj-X$yze{NzBUBc8t% z67U1U{QXl@W6C0#g6m-5Z)4@Kytn3f=g1Cj}8V&6-mPeBG2I4x+;z;d$|bPbG1#lvp!l+9G(Nemz& zf3wAnfD{URm*0nJs-Caf`dZSB8!f$0FA}|nD~phJ@`t=V^=9q1Mho^v-#TjhI%i}y zvcpy(m%h0bdIvV!%{6RAhG(+a7P!P`74&J>SMwc!tu)S2EG^6BRH# zeQUpSBeAHB=6CjU309Vt2RhYde8Xo(gIo2G6LOC`v*dL7k??-6vije#gxZrVShNsR zgSDby;#r*r3}_WHAHUh5!DWc*Bz3g8^s}`8Aw(vM&Q0xxoAXWM-j}YKc89)2imx4Q zk8^ijo%iQbuh+?Kd)=Mqs7_*X&b_x+U_A08UK+QJ;RD&svQ3=ooDE@ClWYBd#L<6D z`Hyd!ZsszqA&-uXT)W{M_RC0td8Sw(*6+-FQ(Fj@J5tqft8Zte;^WkgjJ9(X+R)8E zv(vM}H)L?sB^eu=@4RRLSK(-x(v6`Xi&YS4%D4^brJ~uBUo>{q^Lt!Y6)0_6CD#$V z)mYG=SV~APuPeqrxiYH(Q2t*pF&64Qttj+B8@>L_@dLwA+k2Fh%tm{XV^O=vdwazp z19lD$*rcwM<5t6e=ZwRWErU8^3EQ~)`3S4p%Fx9W)wkC~~Sk?2F#XgFM= zZGrLmIoKMY*+&sd3;-O9zV;U&Lq@TpjH3&;5qiC_R$uKm$bs*^e5Zq(dBtIbZNGqE z;u8#J3eV80DPj&F;e8Ak5dqYe;okEb6^aEuw&Q0(lo%e7ox9uHR9inEZ7=usz69zB zb)z$`xjrkL^my;waO{FQ1W`Y@0x1iOq=$eW(;JQTPW`Su_PP?b=sh2y7uJO(X<5of z8ENX`KeDrg?jD%X?lo7&1Yw5ose&H76<0(^SIyzrCgm&W>SL-Fk+&5&aL)M9UHwDjlEc`knm^euZH;2(})~?T2 zr?h`~V0kU6`hA{NGT$l>&jW4dSPH=Xz{oK&X5#nwBY-6Q7!p!D*c<9}Jhd?6k(7i4 z$zeUeWZsRhQ3oi;!Itb9b(u1;nUpv7hZy;oa~XN(iE1uU5tYT6KJGNF_*lkd%F$Aj zm_~2Y>6^GKQEguk^^qz2e0#dEZT)R}@-aEn?EjS={NH)RyMF`#gFme>RJ>k|9-G+t zida3_eS=8ozYsGx0XqdXSm_T~X6a%mCdo_1EJm`7iYJ*W1Z2h41<_ z=QTGEzXebgt)D<6ArY}y(IjF2|E59z<3TT6K8;uoQ|%9z+Q5q-n{RXsCyh^^uD)PIpMES zAN%&M|-mDy@kQ4p= zQZ%qn|0}l#rUGu5FzH0RP>U)2lH!kp7q|>(?BmWksoMAguOGghU%NKm5U^o#bxj@C z43O$&lLEMSLRxD`o4Fh2BdM8q7T)X($xXYgl#%a!B`Lu*LXHSQ+@o_`0)|ReFJEUj zvb}FD@$XOn<7<;i)|E*}GyK zTydC*vYwMaP`wEJzh9Q=d4)D8Ek2ND0`$`OGpXyjpchR1$(+vA9(!Hs0z?C0_o`*Y z>sS1Gd0~#mT~>xDaDA(D;V%bwuqE-aq~P*!(`XNuvo#>aM7@KhGv;!*e*GV?gGuA^ z-_p?+xJ)t6lDMG2W^USb9H+Y1Q1h_BS~TpNBtEG~J3;>Y5B`&*N?BGX3EFd~)l$){ z=2ccjrNveoS*K;7h>4U;`ADpRO8dGD7$Tj6?5>xYDxM1qFW`9d1i`(PlFWjZS2oW# zBIB2a;#{0jR4tqqW-L~Q&-LR?JVz^^zsW32Ol)8`ZlZqYPMQc%Tr}{s9cu7TB#n2h z^X2;1N)sE74UQY_evE3r+}niX)UH8RO3bL%R`kb}BiLWRVPXnMkbL!u0BCokv9bRe z|FiuQbBn_Zz8o0nMQK@r$+SkO4i?h3oK%9j!MIKIj}*RS`1Qu{FuD6>8HKF;s`?$> zczaqMv7q8l=oi)mgU`XV*awqhWFz;F5q)VR_=nUE3>1v3eLt`ENo4mG0s?e!RMe9{ zkizYk)JV+tS2)m#21L&vjiLq)bkH%iGvf`xo7hYxsn3;5f~tj%gQBA9w<#LOLs`N@ zYZ{j4=0p@1(!Z;fz9Z}0oz&S8^dlvVPThdLRA_w%aX3@V*GZ7;D@-B)f%b?>qdj$&D&h!}J}u^%x9#QjlJxdTvd9$zeC_4t++< zJ3lNUGpHh=EL#Q(=rSWisg@*jlzLVIre?fm3VYX<1UXkAS(is%1QINWDp*`#*KM9Q zfD-4%aq?s2kf=GHSpT+u-6ycQG?Za6=bR20fh-pmK50p5U$+0@OojM%@RE#$hHTu* z#Ml^LkTDc0ZHS^&H6+@X)SWd?NJFBdTjpEwtUAmCCMYp3|Ew@Uf$mK>b6iihgL3;U zwy@~Df)Xh7k=Sb7yu^m#$*zu-pvioUUTUt#{ zS(kygsDOq4Sdz)*!0=|n8;bpLytV}yY`4Vq@kGmV92|}m#QB4j(S0iALX)|fX6)}u z$2ap|k6vA1Q|`5k;WBEr9nYGBRQKaYtiv~xDkP?7*JfK45Ia#RC+T>2Cj}*Zq4a zRjsYwTeB@}Rt@wQm`#SFQ6@+031VesrKqs$bi`iY8nM)iI`ea~iEowk0nyWk57+Qg zPft$%C=dRVuS@jfrDw^@OlQdDaYFX6QL~MGs5M9nCE@9X910yB6~tZU4U(n4^5btq zv_^x~zXBICOE0I{+B(Z;9uc@jbG+^@+H`){wQnXlM z2ZCp}Wn{b&&U#r(E=@Tn2HM)b<>lo(S;MDnH?ofd>WS)y}IeQ%=haeO`GxhikdQ$hF0M$Nm=p)U;ix zK1;7`G=8aie>_XCF6}DcnW939lyito5(?5rOZw;jUU{V!^h_7Y&Ns{TCA)$_;o;Y- z`spermT}Y(PsS^s%sSA~zLq@3SkY}|T8`S8YDL_myuUS=C=KStpj8HW>%zdKh0AcU zJ5-<1NVqS%!Bit1wc*zN$zE*h-@ePhfCHS}h{`Ar)OD!hUz?37C~`i@53`i-M3wlc z7oKxjD6zS$OuQ-R?n(5qY-PG2#LzD;3a$=}s}CV-ka&X{F2UBa@Tg2|@iu9cN2grJ z!(R{R6(u--^BF3hR`aqJmTx-p+LJ%hdAX8aCgm#%?8NP)V%XmrIUk@krzzw71U!S z*DFQ`Dx7bG2&orC)EIJB=(US*Jro3CcncNjc_5R3y6>XXgQv|O?%iupw*n_nS_6PE zINr2ocAT680g1k)r6qSvv}OIKr><3o$ZO{>3vy z7Dn_UnJ1K})e)Ciwoqfz5dT?0_2KKtDrO2&wws?(-P!!@1Pk*3PH%2()HSC1*bl?T z4?<8q4k~SHPrfgP$hFFSJ2Lz9jE<8ty2N^|BQL`=?xEJ1$?{GV{^jL9wsqLZyY|mQ z4Ryt(QQ;^4Tn~=!K@WmLtiHaPZM#WW{>sEs!_J$+c;m6$&sBr>d`ev5tQ+~4y^$ow zL$RbTuWCESYoCIy;a#Ud&97f^1iy?_VG!Ujr`Kg1Mn^dYn^b&#$sDRB1-g(^Ld_&7 zGSd8y>-u2*$<#!vqi`4qJ2?>n;iEvL{F1Z3JN!13AQayMi1J#*1WX5i=$Chqjul8) zn0jvi0k3ozBWXUMOho8~1C5%Irj+8e^BWXT5(Mj%WbYA-de9~4$+x)vwz{ip?&$qY`q%z(fO%*Gyph%M-Lz$ zxnU*fCDQ?M&JCdb$$x ziNEcs7peUhU1e~gjU62U73C`e zQLFEvu)X>rF_-2$1C5dmI(kReOkwS>_M1svb|IxNSc&3@o=QtYl_Ybpen)b|s0hlj zPg|PVmkP3;^pHLJ5-V97X;6~MNgPM?)iQw>&tFM+J%DbGWc-!suj9mf)Hi&)Ki0!< zqGud?zi_SV(sW>Z&6O^~lWNKB$U9@NTIeiQ&~fA*8e%wNZ-#@O!TZd<(eg?`dOh&6 zBmLVMB|RkCwl7m zPH|(fapg>#3IPg`YB;;G8p*robZ`vfkH*n5Q%F=4oEgVY5%2gGC~Iu%8D_A~lN^0K zX@N~NAoS|mN8|d(?L5ECj6|Jw-s!+3w(QKbY@KK>0NMV)Q~n(@(5yR=V`3KhhaU3|lS((mwdVDjbby|9IVxeh=OO29+MJ-{FyuoqD;5Ja6 zY8tqPp3upH&^%5FLAq^P&0<9;18Vh4CR}2?hz{= zIvq}pXs3@pO;AIE1DYwI`0Ue`l3g%!q|!dljNn`&5jgA^x6=0~sB4Vy*SgMR#)O4! z+U|-9#lfGH?2zlB>}BxP9VSNb6#a@85-&J)wBK~`_$2ST9 zvlyEG6b3*V_Nn!I_?!t4^|DZN3!?}z`1y+zsARyhpc*hfE1rCD!B^C@P}~ZRxzEV@ zq2+CSKS$nwR{uihb2IT1EfYe_i}zgq}7W z$c|db4k73`UFQFnYU$$QvQVFe>gmW?m`v|F!GED2L$CjFf$2X(7L3KeE3hyZ-=F<) z0jOvQLvjnly}+72D~Hn#snky=@u2I*|6r(#Z@#qUKz<8fXI7PQo&cNbUky^SPl1v|u4*0j1$#Dx8YwJ^gWg?oMpo3M2nx7E^F7O-^lbk!Aj?*o zV68h4Iw;Ezuz+0(35~5QNvPDXZ(+KW29gY*=4H;~MRasx^+9%bA*kFgHj_-K=ig^% zt`yMc7Tu3mZOd}BS#M1`FpT8=TMMvC+6dKt|Cqk*=6A)Q#KOcRE3~6zWQ{_WzB2Kr zqJ}#hUkXNbyZf1F;S(!k21g=;8B`^4`ckRb4X zno$E>-q2bwm}_&6z{My`tgp>zpLcMu-_%;be_(BTk;QJ^Zecg;d&g<=fEh$uOwR}; zLMWvcdvmOwq5)n$%Qus|4Vg3o@KB6?DyXNtcG)cTzrx5F3;c&Pyy;JTfxk@=M7Jvw=nWs*rWbG;_^~+l*7cGhVm;8&l+lYtqwa&8jI$ zTJ=6oP}gh>B)vaXNNhY>5#{FgU~ZeAzl__8l5@OZxX{sW7V^QF_1ai&``CI3fn<~A zfO>uywBoi4m%-4vGwuukL=#NlmG{*Pjs>UvAJYrh3c^Ic)0!`K%Ert7 zwSMQ-s*+v1FS^6a0+r0$^9(7PwZ|7yxTnW9agnDTWmtyC!Qm$M+q;AJyKO9#er!l= zU%7VGelKdQs8n^-$}ODX)r%}O_e6{<0`aBPo&1XVuQ}8b&KRc~BrksgD6qXYOo}@g zP#S|`q;Tpa*H7PMYs4vAKaXCaoQEkb%``F8V9DTtu@6dcIU5@qrT+Cm;D~!|ZB4=2SZqV+$^o95(~ALXP$xzn%b33Hs&+pH8=WiT zx^oAQ8Q&jj_?DA3S0@@8WaV$~j^Q=VQ%yfchM%3CQX#p;@lq)69cnr3E=0@^m<$*G z+?o_MU2w2;%wvLL&giIN_spZ3+Cny$DQZB=)3Y>LEK8oB@7=4L=dpQs{#m7|N#pd0 z6p@{uqdogAi`!x48fiz2RB1t6{m));u+-rb3p1H*cuU(*7`$jgi+ z2_PK=luu9_14tQ|-6cfONkhyWpFQh9TM-ORk&1ayp~8cEZgvSW1Lpnn)P%}Z=hA0T zy~%yO+kc@fp6D*uxyPJM0>fWz#!F%6+A$17JV_J7mR+I**XzEt_5?@U&BYq~y2i#! zKNIcgO5V8m(AvJ#{q>E>cjY+IQ+;Y}t@=1`K2K{l!l}ScK#Igbyq|Ftwj`x=JUel} zNS6f!0`RCLKO2g%MSHPNSEpcr88vVC6LerE=Z?SFn7F>;?+&*<7f@haD2e!C z*8c%^#R&-yZz&wlnrcj_V6b$G&`~LC^1Lx$9Axo!dtT}=V7799-UCk*lj!6wa1BpQ z1YMzKtT(5e6`c^M07d;XXYc2gF1M(p&uXLd>0plg=rZ{&>l82Q*FfLGsf+<gwuFS%aoCJ@ZK|re{6g0Q({s^h09^}B z<^H4-Cs(m!xU%#ID=U401fF=@;5l+~&hQfU_tp|U-~toI>dW0b0(1^*UIS^bHEp+q=bM~-LcJ-npL6J zkhJ;$v-cBA>G{A@hZ0mE0Q?q35OBgpb93{g@oVu4XNxzMHCJ0jV_*M?34g=)TwgL9GAZ;^Kc4{1!cM;89npf@_LYLJipe9X|7W zpzdr1l96Uh1(dgv&L-DMfjVW9a`88n-qnyDP#hTiDNzBBnf%+g$=M1=0ZWK_hic{*+pJ3~13CU&&FDzgij~3LAjKLIYK#@T5I$7iueGV~yS> zi=CA&?f zhREt!iXAAV6}7ePuUL5A5V(8ovASZ811F;Sq5-5zo8;q_5(LN`b0MmjXe52Uj)Yuf z#@%#iL?bt}OMjwLc#1zhTljUdad=s?# zU@Ip{*J+$*Csux+g+!+2mgEi2k8j#}oiiLoN2H#SUaX1>ilA(R+^mepAM~Y(Kwxid zg)Rc8|31o0supO9CLt81{3D}J8^_y~#wI4$Ro1>|FVDpriHdEw`^7O6Nn=Q>Mf1fi zEQG8GW?vLCnU~{=6@Ku{6jxlxgR}RB!$2-l4KARmt*yVyeT&VB#SW zzk|nEIr7iS_2*+wjs$x)Hf3OT6~~$!dE>4jK-S|6+F+reueJAhL;1nNcGMA{%i;=1 zXqW-E@VK7w!3NQ_S$(R0cld+Z^tBGPJ2UAr)o(|Lhd~@*#_-~+0}+^0i8(g`R!1$FTcGx^pt1j&k-_AsB zFKSxrmuBszj*iLt&5(=Y3v6%Av(bNvh?q!>E6V(hIdfq|NC?uok_jc{+-URer?mBB167 zsU{Yly;0}o6vzKBekAl+evCp=SDy9P3E8u60hK}F)$vT|7H^E+d~S>b8TE>>x#hRI z!90EY<+%RL66r1$mbpatrY83bpa*3TENpCUiz#m4qBI0YWRht&QS~#wwI8oJyKPfq zr}sM{&A2=|`HVu^VbuneT;Y=fBsWjrx$VPq`RB#C((!{VK3n@;6yFc~wsd#<^=5Fp zxp{cBS(B${7d=Cgu)(SF_3mhb9J{11v4~P4s@4A;$6N`CAj8X0b&Rc zo%WGmMA3Q$CB7f>_IE-kTs>;V%`oiUP;(h9L3P8St1OyBS)!6Y)s9Eg=$T6}96pjE zaY(zm>~cmD((D1~WL`?7LaF%V+mM75e{1RuL2|e?{}`7PDLM(qh)j^&N&ouxON`eR z9lBZCJ7xQwO5eFYBbrXYZZzmA;Rk@*wr_G^Hhk!;a0L-lc;Z*-dKY0bEYJeOb#kb0 zYEge8J-;zKZ)9O%kql2bP~eIGC8XAbJ>17iKXTdh-ctHbi_y z>ps&rN(7mtLd`$K#Sz<%DaGv)x|GZ4A0+FUN=nY`4Xg(ep!ESnjxX`g#{!)0jrti9P zfZyHSZGKw#aPb4<=!JLD*YZF$Yw)BM8BhotUhB#JwoK&v zaU1AO$+6h(^dsO2U{L0Swuf8_CqjS1ag5t8l`0iSt@k03YCB!vhyI_e0o!`VokQ|% z)>k4)Z+*}M28ISKXy>>)1Mj>RSnuYKBdsmA0wrKXYWPH36x+d({EHkvl?ccPKOC5# z2%!W{0YD!7aj5_J9chi=Cv;L5_8V?hCAQlqO*xWrxFWs5LiQcj))v#rG(Cq~>B%P> z(}jFP71*4d1uKVETnMmaDZs@%HYe1uW1 z3#FV#M^|4-ZFWXOXw4WIhwg#-dJVqXA}3-seTWku*bfqv&Jd8dQ*-R&0xjxSyws@b61RKWPB++| z#XYDVGE>O49glxJEFtFC$%a}ryUJnuhBQgke}1IW6LYpTtg)jn_f7xdHXglpvH4re zz3@)hEwMrOm^HhwlfSe5m%DdN;TLW14*R3YwYj%3F(_RuR7`}+JZKT4Aqus%wRF;i zhz}o7?HuJg_7C2zb)FxMCuS)Xo}t*B=WIu35fWlcJ$v>n{+M@7lHPjxidxS=X2a~h zK7@fg0>Zs<<5>{GL`^_(2rbtaS^ zo*m0Uw8De}n$7xPGoCsl2NSV;&|+3)9UW6;M`_;JL}6U9rxzY$Z0ciAAJ2Fy*&s0C z=BA>m#st{6L;rZ=ByE$3dxI)V@>FnY%974R)E_?(nwQ{gh4{nRQd7go#*!z`j){6Hd2KgEzjlG;|9zV6B7n5adBau&O8#O=;+}n z1;DE-+(xr=Jic}3Fyhc!cb#@Fp*14Q4cZhpQSC|7)P13SYy+xGegyoH2M63xZZR19 zeiiYw`RTxLL@ijCir)pDA={A>N&4J%Yfnu6 z`0CY4uKpnxQ1A2F7$*ZZ@68(j$+s;AZJy91*^t)dZkCpCE;l%gLP*$Z`7o?J-Fzc! zB#PF)J@`?8R7_7M;_;2cFjw zKsJ7cbKDUx)iB&Td@?jxjMdT6(ckN`=DZfC9}^M5_2GlUSfl4?!a2wK^!^=o4g8BS z9CsB}-L?irLYjkjRv?;s?22 zlG$D2cpuPFt_D3}6bYnlP~)*TU7!XfiwU$)0k~rCq_<+bZBwa|eqJF8Qk;XMBg>`U zr%-h~aHz*~DxbOmI=ZOCBl?b%FSmIle!ZV$%62&oBnt3hH?w+UNy&e@r)2ruCjywh z5)eRm`7cOEWST^sbPJ=4D(MB?5qOP=dr;W2-8s9=boOThWcODqI*0vboBGhF7yZr( z<5czq^Vn_YGqoQVfN%|UlgMjMdnh6v6EDGg(e3mvDmj?dtP{_>D9FY-FV9W)PDtFVG(_*Pq&1XRZLL zE8hsg__tpR-Gj2!IRNl!A8`N7shtjGQhy5_%5{a`%;NXfDzi`_nfEP^BR)MspK2PH z@$8_zz!+<48?Lo2Wsx6l766(dapdJizKQSUFSYia8&;tHaez8L`vwn48$h==vAei# zxj(w=HsZZ&v1#E{u%MmGns?`c&OHP`fwxf9@q}f#x3}j<#1gJ46knM#a^2A^Gxe0A z(s-sCMfu`pT}E?o+(t{!6Uuope7}uB_am+_F*2sNZf^Cvia)tzY9G*UV0*8*@CSkVLB#Q7N3$lA=>)-QL;xQOaNCbaJou9>UF#t4_;F z<9S2_Izrv-7)z#KT4;iFIGW{Dq%8ah9r-4K$0~0j*#lbSw{IV9Y;Dz0U@$yVRdps* zzQWk6;#OG5b7ix=P<^swIgtf!1~I`@$mG^#M^Z^d%bw*}?GdcTx_OxY#)UJi)Dcfy zJWyj~08!dHJY*&$ghcY3;ax%kQAB{O#$H=YQ2wbA;?^)T$7zrJ^M<410jr!naxb(y zK9Agyt6N)dxQZ7EsqFPgA5YG4Ug2sl#zEY&)TNfEO061I^$}rD%*nZw%8|NJ<4eb~ zTSCaZTVep*%cbH{+eg;pLaxSE_taEEd`a1Bc_67={aEsIwtR1vorgwp+ z<0_NFGgN6N`i82+t_e3ireW6esibbjij!T z=*7|f@w%G2(#?^)PNDRksa-BLQ!7VrA6G*QN5l0pA5jtqGzk*t%P1~Gfk}E&MX5zqXPw> zHr8UCA)&aQVP<_fLt(1SMaCF8za|n$;(l_1q*y`z^n2{~tb+2!hWq)$A;RTP4jX0} z@d;ADe*OBu@N-2VOMXvT*%8xm+h)4GU%*Y?kzkptEbv`T0xyd@u2h1=i(y+Q_ow~O zs;LMoA3pI!cii5qqU#+&{+?Mu(i(q=w^n|9X~e1@DJ`S?J)XJ)39?X_6k`6z+-xqSm&4DKrCcyhY-g&tlUl48B!ChVq2pVP|26B%C7H8p64$WY+1|>W4V2v*UYkDd zQF((iU-RxiF2s$88++qMhv(J*`rp2p+uI@2b!L_MCC^+-!h6-|;#K_4-Rll~X&Yw8tSG|;~Dp)j>^GCM|I`dDTcRMX`$m<6vs7m^n&8`2%#s6!6PTd z))B5})2O#e1w67U4=*Eb@oG~}>9y5hOGNAp;2(V)GjA*D(NIt@J~Fs*=G53e<&2R= z^2jviWkGlH)57G^Lu{tl(xfmkd*8o|WnL>%f?nc1q$rWbfjO=g*2RtGjMrRZ&&Nyko)N zanj}D9ld4`7e$u_CvyQeG258|ArV=j+*5{#ddxL;A*OC@&`x(fZO^q6LEU{DbbJh;~O zN8?mE44yrI0?TB-d1J1Rbh<%D;wuFe-M9)>wW9A1Lh-K%(|uydycL0?6TVRdjctFw zf>qI4pI3&)t9U-y{`Pqz-FEN!`!sit0$O2)VCjvwbJ=5qwFpFHNAF#*aj>y$xOpjG zN=v_iFV}Kz%D+G^=h0Ar`I8U9{P)c=XexRg#nF)EKR<{TVqK(5XTJZXubF~8G2qt4 zAW$f@)U~jdd;W|z6q=IvVk4rL#RSn*@H_BZ2w-%s~YW_0-j&_OMRYv`P zjcT+Od<3zlI{zL{F`0D;FII%T)~*&3UE8f)vd79e1OqeTQHo!mTuY!AOK=yHv-6b` zad5Bk!}6fK!t02CD)R8h<*QNm<6QV^|D!QQSf^7%AH%Jwi_*JS(>3_8SKdf9C#&hE z5ebKur5Hmm0iMnZk&nkBhhlyQu~JEZnIP{fTD;2gTjwXH=p;Otm*!yX9xoHSk9ia& zV2(~PJQHVjIkpTJ zcvWLvwklLIR4I%5eh;v7<)i4D-z3+FNiQ}X5`v@HF2mVE6<}USFa6=);j!-F^INAN zEZpmwlZ&=%n2tvnl%q{8%k?w}vMYq!tLe-6eR3+I8P-p3rv* z6ri+-N^PHBi6WBqcJ_1+&UxXp6DGInAn6!p{1tw)<{O7FGYa}i=-AKO3J)BqXRg4< z|0L@3E&S7!+DyHFAo4=xl3+3ytmw;$Cz$xNf!f8hoUTZ7cSXTdIreO!j1)5}w1Ar< z^ZL9Z7Z^;ZOD)0Mm3*q`9Wlh#2yQ2IHjQjI?8h<7e=L>WeP$TQzR;jo_uA4PwVkRx zH&VrPim~-3aV^oxK8XC~dsA^c4fFcA1Izc1VEn!QSfNI$LCodIs69rXg19s^Kc-4K zQY|^ z--;-!kjamKdP{cq)g{C=zpci-89u7O6eBbTD-P`C3E$^;Qt3rawz&|-Vk6fP;7)E8 zvL}wC=zSkHM*U@t<`9B^7wa8ezyk~qalt+-+7%&<-l%MT9%&U(WtF!-Jv5N_e&7H5 zBN{K=rmwj@9X|+Ia}+%59~ewg_hm$1tXpSYocIHZGOhNKDT$uOYbKwJ3ywv9MLyY< zVh9j~2Rz$Ch6SfrQ7-ci{%;@xob^o=WasU0`rbCSh^Yk5SVWo9z|b=OPFgyw6;$7kvZns+P-Tq}`Rm@I3t zv&4cca$6{gUSwJuvgvf=z2UUeKE)?9PwFey>f*x&4>tLeC51{sb5!0|XOXBxDN*fd zk)11jA(1BPqxNt@>l8f(a{y~sgb%^Co7OCsO2DO7rcqCeffIeNOwhGlL$FE1vWlKx zNBQL83zZLIa@^p6@ntRgass43U=9oBqtL1R6?gL64VY^Tm>)ReUo2YUhQG#lmH^L6 zH%vFay@rMOX4H^*+?ex(Kt3kAEi_LP+5Bh{MtmK+0 z^YQzq85V~5y12nfhCXx?sXLs7(#_NsVj5K?xt`@qShDJG!V`P|bgs4C_GF`%>`{{VMCLlA<-g z*Vq8d(9m|nCxrE3g;JcU1j*2Pg)~)!cW^l})I$)11$=kcLR2HPa&()ElP0kzEP4jp z36Q6k%A)_{&>?zYBz%YQF0(rGBa#_1nH36NZ>jul%6Ol)t}XjEwn2T&tu5~Rv+g^O z!=fO1bK+>gs2kJ%{9^j?lgc!?0OOxNRZCNL{uTGv{7kXYnE;y4){$7UH>!xLG#x3I z7$4uquPO)=@B0>@{yyRla8%K%ZP{>j=K>2o5H)zr93=A&zDGExYkB>Q);D`qk>}m5 z|Km-?pb-t_4@|ly>U`zkh6g`E0QV~zGD!di;Qvjr9k2?lC;YKusdLoxzcR5dNNsEc zbHmbbmUj}DOclub{WK1b3q5qF%#PWn%u0!6ZD|5RZ$pw1@u@2o8R1tlBg=Q@`V^m6 zkEoaG*MV8fHusrP)WN`en*PZ#N0K_Rtl)#$=VbDg8$^^}(@z!+67&g@b zaY}Yj|1X`U2|@zYQNL90CHQK;NMkoiB#nddc@yFD%&-TJcFy6qR4~6)D=N6j7^&9F zOM!84?%$BpD3K;jcPWT&p zm71$N2ceOS`JS(t$TseJknuU)o7%>E^^)V}6GeqY3!9*Z2QSI4#HXElY)eIoLPi0g zK#P}UIb5sHr0z${_0nqEQp z*L5-H``2O!N9d3~KDN$`KlrVGP8@5V)QqADp`>>F(|ZNlEDjQ9zLf>6Y#eX{5U(q!H=5bL)4$|G)QnF3&l~1G4vC zYtA>v81GzntWZSB%W;bX(xE2Hc)A(rr`=}C;P?e6-YU~DwE0-?U09empanLnp35cN zEXdI_4foSvJmN%CT#J;xb$J4z6tE@U*w{7(PabISI;GCgt%EI4EI9>83h6!9fLMOW zl&RkvybNC{5w6r zU2huL$6~j5(YBfz3`HeYT9yaCa~t96-tI+v@jAePBeqT0)Jn|Jbl>!{tnbf}J!GgtT`wnL*l_mzlXMj4diD)hz;#oV}cQsTQL=9yKG{u+H3uRJY1N5J@boi344yb%h#)R0+u6X~&9lKt3_ajb3*Niv!c9Td>S*K>8VMp|p2ogUtCF+D0*s;MF`>+gOdKf?o~_B>GQ=odeSW?6}Gd_T+m3 zNu}vEaSRbBI(RlOblo8xTb~$Ru3lA*+=R#D((v&St{#twiHc(S3sx%WdmJtCTi5Es zKYg-U5G+D%tPfE&@{`riFmlF>7pcPyMnDrcGc&8Mf?wT615FmtJBEJZeU)`$w-$Os z{tYAnqL*PC-)qT7$p_zU6(-9WkV#C=rt0oQfu^sNMZ&;zIY$xFro@$!+Z7c+m3 zgN_lMda{_y;y!jliatZ|loqu=y7tq{5Xj1|@e^tadn1ysAe5koKI+;&64XzT{;WZF zA$F(5wM<-qhpX1yifq@?t}p+GbliR=SB*LO0FdwAc?sw|y11Vo+-Q1P$b_-It3h~} z1%ST+^oulokFmL_#IrV6r7C#UL0x#{L+F%W){CgbN*Iff+E0k8LAY+y_W5XQ>q+o> z%g7tES{Oq^4xp0k>h8wT(Aep*0RI685g}J#(YS1v-anda*cl|8m_z;uoYnuv$)7zD z_rrMgSta=MZ7TrA_Ci&8Uqfs3+YeX7&-yDJpW}SNgcr&4JgbaR4f|GugTCrH`A*E! z7pJuhpRFvE?tmF*fyZ6uvs9sUSC;&$nL(tqHT(9it-N0XKo5%IeHIzfF~t$HX{drg zh?`%qe1|3ev5Y;vNDNvbGskvRExiHWyqKmo(Y29_eu%zh%!7*f&5!gHk zn!}nwf8E)EaQ(T2#x(!bjG-Gq2f)`FU?*r=%!MeDZLAArB+O`L3fklSZrh(i?B^QL zhmy;CQf=%^*ofB-56#X9U8A47iuQqeuiLC)1}Lq{zkrO#o+v6_O6kQV;turMG8dC z0&VY{#X#U#_e=b82{H`iLxKeRc|wWu)r3nLzh-DcUAtI^|tgqv1~14fF1tbf{5Ka#@GuDRzUjY#;Q z=P&kO2}p=hh`WDwynIkY&T?A+f5Bx@u;SBwb*#AVp(Q0K!5mk`t&)|c@^%{6-#racn@cB{bKA(SqI0N>AnMV@VA5X;Os2#Z`8Ti)M1 zNJ0;`R)9hv>rH6mb6n#G%bT&smob$ugY?P@K^P7Ms7~{cP?Chpg?W-hd@!4g!E*kI z7ZjldrJ2D9=xul1;Yz$EjeOGG3Lba}%KJ&ye5NB5g(|=|Vh_mbiJ5_h1^pN#v7nQo z(u}n!bb42!BvF0IuC%4BZWF_&lG7I^>6(`V-miJzBOF8=!&&Kq_H=?7P{-kZ^s9U; z>(RWeUoiKK1JAg$(FV;T01Ldo4CMSUL*}6QJAxg_8l6+!{9vZm*6E z*%`n2j%zE;VU|FSNG1VU?|)pw|92NDLA1(A=4**GR~%Km+q!si@|ze5JRf+e@kIFN z&m&lfZ@VY&-u}Mxm;pw|q$~V`z_B7K#Ze^L%l0P_`|CSSeS)__`BGyYuULujh3*J1 z9^cc2nejMX;|ARCuVQjNyoqVFCp~I96ZUTvPa&*Oku zY_UzWFB0`wAd*Kaa=)l7q+r{V2ha|+DjrK+;O_T23i16gv`D?Gx3WBgZ>FH=$KNhc zr}`W&2M_WNd;?OV<^Q#8+twfPO9XK45|8^sLO&34v`PAGMEp+{jl@Sf_`TZ`I;L$C?Gab$}p!)hg!+t-h#s~A32 zTW4beTy6JKj-Ro0cN*yAdHw;8-6;4fB5LKeE$1Nb#sb7oyLGOk1C?JAloOCYUds8; zyQJE{Is&)}RCD6KuTg;k{%weSV4>hMN)v3O>J0N(rwTzy8Jz6$b*vAy71j${clAGN z>&tA!J0EUd>-vgdi%Tg4!-GaQNv7mWsUF(fkCzlw&L(7*0ADkeiXX*X5eYp+T&%k^ zlq;*wkm$fZiBZ=XJ*#Z&G$Ff-s({M_GvpA{R8muzmACk#160wP3Nw!<|zKR zy*$T2SK`)h(GJG@u2kEg@Ar)~{^9JHndQ;ni3OB=iyOZ&CRe+|e;vDf`)=tH(EIMc zy!W!5|A*amezX%x6T;own$p#JM*DVQ#c(?kK#UP8v2K875&`B1I@+BBlQ~RSPJlkp z576TU@a)N(M&|?A4ZsMXkFT%6z+%xP$f=oJ1on`{B(uy2D=WDStxZ5ti)&ePe2!g& z94^UIv)JW0IzHo*aPFf&d8=S$YkxF}uSLl=XwL04oqcN3UWSpKIcmjWe1fl2wtF0d3v6sdqt;s1(_ zS@v$Kp9~bh?9Nmnx==*N``|!Ox4x>n-1oWqZ(F4XYT@V7_Uydlhz=LX9ItyfU-;4j znjs=dKaHOkp|$bcNXZYsikaj;gHUO#EWb}55_LE*Q?0h59zPM~u ze&tVJ9TSZKkdONt0rZ|;Z_1vIF%Yu>C;-)44)4i2>>E$E-H}`D;stFS7EO{rv{i3J zCufLrgYg=+JJ%dh&M?nup`I|CFQgFs1pJtgz!M2rXH^m9w3e*jlF7K%OLIL1Mh9** zblvu2S-lcp(0*FJZF3Jq(TqTRUUb@Hl*b&O` zW$M|MOzFL7IRC@<#b5D%kC9bb-QGBW;gj2=MFMReH6RAS9|Asnek(uU3mmW#JI^z| zseDt0G9{qJB>!6yz_qT3sMxL`$2oegY&mxo}13@(mfGZFI04$ek9H{Ik?G+2q+Ne^Ew-KO}p2;a>Jw0&= zpAa)4>QD{MFS8XAuS9sK?ULWBi_C?1>>|FK-P^BpBBm3kD>yT=&m(jJMe-D)I)eW2 z9}17E8buQJ{Zd#!9z=EP+Tr?d`>@Xa;CPlP)jz9TP16AMR*%Cy5HwLpoVMC|;Z5>@ zCdkoXtN?6?HrE(@?aY(t?bOQl|C>MQDU=p)sgaDMtMLL0A3+e2o>#AqPPvE1)tkg7 znWCKR1N9Tr(8%OsRvnxmD)5b89d)QagRAZ$H-G}VVpJ2il4%@mB%2uQW6`wvzDF4m^)Vh4yZuxo%F;Cgu>o>z83Ze>CSqT{x<@M*n6NhGVPYGS_omup`f+Pm;6IH`7AJ|B8X$z|IP&15XlHEuk4yO+it zPkj25Q58yvW&=XzYKQ~^>8yXd87VSa4PbZWeSICgs2j5Ejn3`o^Q{|Q3|J{hFhPI~ z;(6xiP<``3$gh!F$_ddGQNt2iZYv;yv&edNefk(f$bqePf-L~>HJ~~(6qHr?%4ZHe zLBKbCKfY4MyC;95=3Rg7`&IY|q+Wou&NCJd0e%laVtwGVPzP(eZuFt$0^r(oE?4@! zj^?LFjxxzzY1)>)!D%V9AV8ca(-u#*lF2qQm3aIElg3&jzJH{4ATMZ`TD5E)AzD27 z5nZtnw@A>@n|Jo9O?Yg$%kNTn?pOb1$fJ)v!%V@ONY7XrQpHlxy^as# zEB}1Af`MomVnywIe2gBFj(0iAiil{Me^)pb8#PfR zM>>~LU>wn2%_u4A{P|w@fbos%%t=IqHDurV{Q16f*vVDf_-H9Fyal|tF|A`!OcPmPOx2R zSo#@sRUzM&ev_x$?Ym;Vn=F2M&aF`LPjsMiIhS(JJenpH4AfloNm?7fEErEuK}Rio zFO+(Y9Q|8G)tm4L+cP9g5h!zf*yO1@n=n>gehlbcWD=;tKprnEq!h_zkGCTDiQT{F zAvSWL9tXVNE;K>oYi?T%1gGjDa#{}m6vS3(=UHPMf+Q!)m$FT|lu&R-O$$eOf6(0z z0%+qXIRXG%p16=)urcJ_4Lw8n% z$F4t`Hx6(h2ZYyvfI76Dq=EMid4~a|$0>!MBdD$(Ii1m{e*lcJ#!~zE>nL41_*LB! z$A9&XLH+qY>&e4Yo2%oYl2dTHs+rS@zw*Bjr9#Jd;F8;RJ4{kM(vZ-?%dj~l*Ly$S6w>4nDD-1s~pHGTG`b1M-ZD$A$x>h`6of#o-baE9{ba;Q3fZ z$OICET{kn+t|5yG0gi_)kxG$OI^;aO+V;<9w2Dd3@w8~i;(d1_>D zZ|~$LwZiyEEmlNydT3Y99saBk5Tn6t_L=RDXBQUPJUsSKnr%S+h|SR+qHplj%<7`v zYyVKo1U{ZLABvVxk&#w7fB!d-j!$|X#?yONr}r!RF&FKq0qKvt zzz{>$eeo`i7K39`g~56H1TfG+=Fgt};_|I5#5AqubNBb2&O}o;mun)xfoGiBM8~g; z;bYrLF@$}2L8(e(a%ALuKSi=uK^SX92 z>DWZDX4mlU;kM#xG5vD=LS#h7w@ZF%I<srx z*}_HAD{Vqo)|*ZEdQ1jFr@CpwHa2(vX#oWMum#fbAKxU=*?t@ch$@=8EVYNuSYnzzohM z+%scuufWWMBhL@+@Q>Rb^BXW&D-s$wPYvapJdkbe>`ryI`$x8`3h60OaE=4x<10q4 zA|h^ZG?S}xW}2v7QW|^ZZU$cfuJtk%(B}72!+Hs>iFPASMo>QiH3R-gNmy6K%J*Cl z!EGrV0JEHXS4HWE4$=gaM+Vv%z?FmIl7@?_P$G#DBs^%XVjwg3snXxaO(rS%2ylu$ zCkfSEDB{-7K_i6nV<#d2yn60&&m`5B?*cK&F2t5!p=3vUweCTuu-Hrq#8i;&8w82E_YMr1A77)pa9Wy74}R)7Tyo#J{~XXSo>o-a@$$GePVQ$e)KV7HD&~H1 zs(9QHW*AkI0$|R0XvDRO?UbDKrDhcGif(jR7{arPD-7UuPayJ5Jg|?KV|gdU?ATy) zX%*8|H^-397M7IM;#vGjXN^SA_?xT2Qv$*f5fQr<66@DCH&Gy0w{X!|Y%G|W{&iT# zEtdj6{omYeP6WWbTjoawnE==U^hTrx8CfaY*XV}Z&8W&Vy(OZ}8t#5GD(iZGoxi_a zx`)J(GW6LweKoG!H)hhiA%%q-F;XOY$pev9Vv%t$U|x}zlhT=2fr62qt9iEWhN(Aa zncHRyXq=UdR!^a-N$KGA__yb_vv)CMB<~77Xl{IvlU{Gd_@NAPSGCo0Pv8X4M+ux#Pvy2YkgKv9u9I%TRX zS!n(ZKncLaf#@>ZMkWszgTm7qeZTq2-bYc)GIFwHozR^YB*KLQe+r{ao{|QxGUG|N z`~3RQOf>J0jm2^4JLH&U9RU9UAJZ$Up?CPT(62O}n{ni?2FktNb!W88uIE+r(AOi? zC9h3VeFCD;rRC-WO)>_0zKDME0Ql#5;S+On5eED(Pu6cUjSoJ%@az{&(^YQw0zhNg zr>*;?Q=XaS5~=Tl6av^)ICBoCmXpjRz+gP4%9l*9t%oN=?L9i_HNZW+EU!Oq@F*od z#_ieCK-T)yXMH&Vg*|J%PVJ9TbmRfZpPn+heEmvT#c8=wzZ$n$WR)oj z)a2hh9b^_rC`>^PDjS`Se|oWAJFC{SSZ%oyQ(9VaxYU(>FJ?XeaD$_yrmMhh7+Op3*FInubPB`V`BfIG`{o(67 z9SaLfJGAub*EV{i>FMu?Y6IX?K_E;WnP_kcguDa3o&c2YZ#<8?Jo#U`v>be1W=Fjw zj5FyD6U!A^wx0)j>aN@U9}ozTXok2S`^D(+u7u34coEev;l{59JmQTB*W*Ge&35V` z@8&t3I&Uey*78*-G&VM7mzSgKwLg%8C=bNV7!*!MX+oZL24}8A3a3z|cKb*2;5PAa zs3ujQ75zpePS$5k1dOSd>Yw9+q+QAR5{05{OwwykWA1zz9h$_}){y2d4z*tS9kvGy z`@#HJ(Y89%S4`)qw<8C28ndtwbxvEHNeVu+=bZ4xxp;n>(<}rSxJtI~lk?qKFp8j| zi5dJIpBs}d0aZ|J3xm3Pr7*8fSmAgK;9G#+044~U#yPa%zeTv&0|sh#t(w{l6>;zi zJO+hqb05QbQpe}Tmy2xklPeyw_p!>*KlnnP1_;0FPrf5@wOo!eib6T<{gh%# zQkcJ#rwVzues^s-gigW#?7??i{yEndwFbAu#v5DZ@hw75OH$C@( zne7;`VM;L8;na2kgBtrEWa}xTsQu~!8mav6C0lzP~)HOTx0|kaMP)1 zS!Lw*^+^-P?5zLdhlOM*U%^4zR_*5diqve=_|0ye=3K4DU?I=2FEukcu~dxuD&B#1c$csyxW!Ut23j!df0fkxWFadvuoVad_jTGTIi>qcdy_lsqc z{Q+)WRY`U>%vhPct;gf3(NIsOUFt#mud4$a$=nfU$Q6qf-8WB*`BKm9{QS*ZqNxou zNC)VRI!}!p7~6Nxw@>;pM7GyE!;kth9|b8oH(C4JADoEt`+H2Tqqbcsz4lT9OuUC< zBVroJtH6PfM-KI7^ONzpyPtwA_h;^0?!r-te%Q~ibtuvXKg+R* zv)`%ZZ#4`Dhaiw3xgKNcy?Q=;kD`vT!cacBa0eNQmr#onc(k>1xHMbtLxLbQr|b;5 z1wJM}&v_WiJJdj+t)01(d)E~on34}t7p1$}ZCkAT)MQApGN^s-6P($F?UEYH6s^TE zK?VYC)i}OZjW&B}-(Xo-SomD?v88ii zz&{CU`BD4eA=={n>x1ZrJHSf#+;5(-G@2(iWI|sPfn{h~l0FGsO;8)j$VlYW#UvZY zq0Y5!kkp!3$UDYs)#8(mzp$z*D&&MvSbs^zL`Arz)z3xO6&X6bM99*r9>}|@8yz70 zz{bH75F|S*-C-BK$$Yl%bphK?CXF0B}ZnD4riQu~s3)WZG(TR)>sEM?VSINbN+EA*n zVZ4QsNZih>APNYCvr^LacK)y9lh=N)A)Q=YVAZEItT_&+_%TBI(*kU3ulZ$UblQHh zJL*%mwAeFVp!7F(y%>;?oLa5?CeH|vx~p66h)$w!>PhQq%1+PKLrvWQ_r5n4@){3D zW&1}XKpJ29>7jED{gt*v)Ytwe|16yM2pyz3EE1Gx)%q(1z$WiD%mQ*U7daZh`9kp- z>Vli!CCm|V8F6?+vul8a&-wNZ7TPj`8;4FvP0cR)ySyDe(8DBpD+CrbE6w@&PybKG zll}XLnm9HUpg$>Zv&G&XZo~6##piTtK|1o#9d2lrJ3Xe$=zO5KFa@veWryuJT3)4k zs1wT|2j`u$cv6ezu}!zbz9%k)ne6M%JjfWEgAcysfueAg2d7nEk&D= z@?#I<2Qrk?gUKCqKl85 z1FV7(AoL10?LLAhhCVv~qJILaGM7ff3JJ0Gu~!j~C}HSiVVV9nFq}?l!Jz3pU3BV) zR!K^N54gl6B%o+@Q_m>s=&3Bpp2!I6G_HIp^RXhf2VhqbjEP#NPAlXwgBgxlz)8x6Lv)vi=t ze!t!d{<(8bzg zy*bmz#U<@w4M$!)_XR_ml=QoApg)WuA&F@5^I-!;7K>r2*hTpvBkXeWV+5+5004?fyVnEv=w{);2UB7`Z;XR#G9--{@-P0;(op zUQ|qO)PUXZY%F%AXM7puVg`589-{zx*U}wgw(#ZZ-6Qn=+ ze6KKR!;E4Z=5!+Bq5^@N3~DGvi^b-<);KI`5enE7I>w|2+wcyHI#*(6J{;4S+dq8m z-Sh=MoUa($2UG)rDBJ{WAd0gK(W39qvg7L56Sa6^#;g1!6v{0#Q2qbo!D~_Bo zP@J6@>fyWl1yt>8ln-aC8em9pZUdaNGn5T`Wh6ihZDzq|y$kPmzPsGo0p}$;jKp_3 zLnm@2-KMzYn@td4;eso7Zb?i1&P56`km6|&q3JSnd ziW$WMrvuGwHh-y%PF&OE=K5RcfFfapD;1cM_<|V&sBNB0)>oNu5YuK(z>iz-+csCs z0*@mdVkTe|N5ad$kt?YQ^fIgo`M)L-y%+TX#RaHB=%^EvpVVyFcW_;ohmI*q&0b%B zcL9BYIE8LdIR0`pXy>NqyKU;_ST)wlW@p!fF;@Gmh$uFTBWI0! ze-l(vgA=V5A6u)#XX#MM1qnfS26Zjy6x+?$i#v2Uh+n(lD_&JQ1Ar4aU$A+{ey4Ju zGwK;NnBoOg3u4;{2>=)>tqmU=-iZRe3K5XRt0ep680-`O9dCoFM8ip-A^JOFh?$8E z*QkonL8_>BJZlEVipaCD0oDAMiX1%rJw6^to#USU~MFu;hTd zeqZ|{K2rreK$DA3sTkVjI)3pZ0manMgY`eMu9mCFppU7#C&W?-=&h7zBuuVKV$01p z(wNT<1QST`YT~x((A1toZZefZd7cze50yQPAB{%?@|RrrgXJ_!M4=skslgZ_J}Y?` zEj$>%2sB$QbV#;~V(fJf8J7oi9vBOeP}8R>hZ8Nm!M%~mgKGV(u!CkmsR3F|!awq_7=GJbajXl`WOK1TIPH;-@^qGbjo1d6P>e4HbXjwoz5)y~HQBJX#@km+Cw z!1aPHLI5yyfK49k05flBh+vQf&`28&*YFfjVeZ=LHVO$?1*n?E6E(qF6i1g13C~pC z%2rpOj)!3*e#Uw`I(A_r%5wimVF+ll1C&tH!K9uBKo1qho7i&UvynO#NdZ;pJ`Hxc zXQX~)SB?4e0sqw*%#!aSYMn8-ZcD=CY!5&l=IVD(hTi0Z(Ho{OV@`cISQ8yMm}R@l zjL&mNNfS+OpXVHz8A#>Vx&y|n7Z6)>smZMVYO5>WciX~X!=wt;X?+d3S`Xj*$wLR# z3|p9MD8H6N0sbOV=-J7Vl5w1K^KiQ(f7`PeF!I&f^_Pd;_$BW5@kEzp-t>W|Tz4}F zh%5l4l`aU52d3`l*ejvRT!ZT%O=K8SsZPRHp`Z(y)1~%xHJTh00JXd`P^rTK77{uD zPb&owphdE>7li*tfd0i6RHiU`C_rwODzK|7RehM+K}Te+HN=_j z{0w8BQRn1`ocvj-@r8)$#4T7hI==)0d5KIKz}BDc0GEesuS%qb)o}OX0m|^~tOyPu z4bTA&wm}L&^=>R7t<&a?_KX8l00$%ovoTGE{newWMg#hGlsrm};U`*Ym)N~DAhdsI zX^O3Djh%e~ARylUn@b=av9O^_69l!eqjWlwPRVbmE|p*mjD|{2&b(p<${r61X7F1; zntXaD)qwW7n*@X+Aj_<4FaV%70Q6KMl4q$n8&zpFZx{yXxwW=Z)@Z`aAg&FvdyHbh z5B||P(Mf)P8{WcA{WKv6F#G(OCR3vX68w?P09r5~`uI$15V+b&4*vv%3LZ+PzQ6fK zO^=0@5PWBWkzWv?{a9oAC@MwUZWti(lYa&+dv*ssaI5L0gy1EycL z1$8O3va$b-R=TTMqyQMjM6Qpgp+Wlv&)ta`d2+?8u1uWm&GPo;*#maDPeNf}R=!Gr z{|Ob9$;SN-+75ewSc{iW=UuxAfbQ76Dp5QRhPxJz+g8Yw#A+&>*=R9N204)1y$iXA zHB=s{)jyeRLBR#35T_*R;w~_fKK4DADklRV3eYiM=(rilF4SO=)}t>#=L*%a_{vXq z)B=_bOpgpL;3UE@nT!Ix3J`>f0$nIDQOs2H@-Rz-T1!gI;(o6Cmbkx%JveY;IMk~B z6a7f(IKGXZ-U>-p1Y2)mmhp+{qB@lM!+fGHx^%7uQPPURC`TFDxZyy+E~s z=_}A0EuV5hJ}TpP*KqI-<%y{yzZFvEsdIoyBegNu6CqExK*{v3Z6FYE@rJg^9m`q&$& zLYxs^8%rZXvk6oq3n+L9Q#Q5T!Ui%#%waV8L_3>%68PHGK9ythzmempeFDqGh{KEp zoXSIm&M#0Ma(Xzhc{K{u{yMzXmwpF9P@}`w1(Tj|+e^^H*Z0bro-?^d-eK=Dx*h6z zu8>8}-HhN9mc&9JPAM3QnZh=n%Q^rP?Tg)4_Wpwiflm6bU5MP*>UDGcQ%5yB-1e$?7x3sgq~W^%;A)S1TqQo?jwJuCPu>Kwz+X)vsf2m!f|3>lBmJ>Rk7 zHGPfiw{m?Ncp(IF<9wTUMP-|2 zeF|a!GnHwC0gcD4QuWU$eSLotZt%d{Y33PEhRHK||6~U^M(MowX8+U8K}PX}glh(f zaZd!6GkCrS+@(lNJgJmB`22vd~M1rqESLo~zhz?qVP(GUa4eWZ5uT#-iDoj{@1vm&06oBE&qKfNBD?p&7 zvG+LQVU~%CS;{ug9?QS`-XyLdcl%8k9Vl?1wZthqke30~&2_-Vk#Jj4^AW{kbcpH? zx6nG-TscoI>E~bSQkz!ff6d}MIa{F5 z6+m;F{aGUZ3!bc|i9gGKXmGbY){=(TK_{8ywbRu-mN1=c>K`7PFMrN`1-(kY^Yia~ zR8mxI%FtSIGABwl_3O4}SaQpGz~ySfEp^h4{jiWjbWfI>CHOS*#glJnaue>uAxska zHS)P~NPHPF;>=BPKy<8zvXJ=;dH-0-6jdiIFSl>wr4{(|2Rr2pZg(gNW|v9g8jV z8uceHM?&6xdHuBRKryZQH~@NqQ9wiZBNL4o$28sAb$!Ujee+MCBE1HZ_BX-w3OKCL z^y5Z)Wx33G#WY@&LDzBjyRNSv5hZ|#@V*2rbc)5s8J*!$v|v z{`#ySr@q2!aI^h!GQ+4m;9R7b!O5)d^PqmukF!he z*FTLFoq#9qvDtr*_%m)v8jli?hQT@-O0d%v?YiFEc(S4I<-*d!M_{;~hH@`3c3g3I z+uZf-UOBpNlX{Mn@x!LdRV!)Bg$xcZ!|Ch;bH{3cKmJkMgG)V0tSc5v8p^--Vtmez z7o}(Ms_nd~kbgCPD=yqk`76^q>gVJRA|lakXBS8!9y}BTH||8cK8FUMA>g$0|Fi%P z!km8X-9iLWKfRVtV=dTf=d(u6?^?AJu;W)|S3;>RVB{TmTBa>i$E`Bnw>(2?T-8SK zbP{=ZIIE_=GmQIFeeBWGHYRx1j8)Mprn4T+1uQ}AoIT>zKD z-WBmLa9Lg@BlsuWXk2|on2Rc3d*C2Tt@nIq!~3sZMPgiKXg%xSywS(2X?)S{*R1c@ zkT%K?Ps6MbjN;V7zAcd5k_-)hxX3uyWC)0$HXs2|omPcmZzzfrEF`P~bV1uAXPR`^ zORf78zq_#7vxYZ%zk40~l*{zb2gqKcVPJ@{d;%vCCD$jUyQktNfB)V_h9Avutc(EB zS1kAwoi$>c`upr{wgUujf2ne0_;D;`*?fnZe{yU-mO{1NtAm z4`=Xv1qrls-uftNep+`~k3ai=ZR!8+j=t;Z<-KC*!WWE2TRz*i6bk14P}MTgzTa-v zt{Qv|MHYYR>&rUYI6l@mioHcg7>0ZG`1!?JeK%ZWa^q*D|K7&VrU~Dv%`_^KpZ_8{ zz<7GbH;pHwyK8@|5>QaCU&>swMn-y3ninnIGELG^7{_n5Sv4I-`Wf3_GS*@aa+q&S zueysBy%t@+d;{?+PLC-S#~CoFZRAt+!zLPX0zVck>w)#R)D1*Q%Fnt?7XYeqDw>^* zA9@!IWJcoB+4yj9a4?s6ZZT@?rZ01f()!LPmfv{t#aMpeCZo}{KHzaYEchRTf~3e|KR-C?nmb7cjtI_-fQM%`KY!T zM<#^(F9TQy);Pk^AncGzfP>jvr$WlW?gnAlF=oQJU>G>Z|av~ zTG#h_@+JB$1eLM8!;4FpBRC3^oZx|v!tg&k{l5cSmkp0f)RJHptb!rnqKbY^f;4yuCAB)+Q9S6k3zwfvciwI2 z_p71J$IrDp7e!y5Qs$l@xRD(rKoajt5kbD)ILLe;_|E%$z1!lL!W|v2KUDftkcP&! z$@MVKnQ+S3l!d27hwn^S8}AC*FYP{#)D@bx_GojGWYh1z-Jq8T^Jzz*^CoOs`#okj zj*p^A2X_R_EM-%ir`%8hCzM|@Ftxwd2{V$|9;bF5mD~Y%f%%&K^f|y24%_21)q9iQDO=x zNzNYggl_XB8YPi;GZzi7Fv6j$W&K0)m%mG36s@#AA=-H;J8;+PXzL_bP`M@AK6z~$ zy;jrY^5De_b|~}IIMw$Fx+0hwnG2sCn&O|j>@5<{QlyS=Dkf|x;!7bFxISqYt&Bf94 zV4x8~&56J%`>eZn`@VqU{n6MYMpku^%+0LoDlgqA+TWM@_3$=mX>LIStf z|3%8%d&VN>An8vWw#!Kx*8?0(N!!ntvZ-CNxrZvb>~AS^A}4lLjeqT)HY@2e7H*Gh)&B^KYBDnazA5(>+6E=)gk*QJ`+P&R3_(EyxW6*C>Xs88M zrC2?y?iGj_qPcRXw@ctHwRJ4)6|k%kPg>Cb9ywWpbi1M2)W{+e z=(O5=EXtwSF8){q^Zm%P*X1t_UlxEHXdh&!DbcBH>irpU!Hpg3FYdw*y2_Hc-l#+J38%Z_gqr$dQbj24u}BHzim$eQJ*})?K>w%{_RXP+ z1cCYsGVq>Z?jo^I(Ub-Txv<rv9b^rouByj%)VpX+$>NbI zvJ!h>m!unHP>D-Ws^u{9>0!TdZ9W(*?XJKI^YX0wow#PXbL(;+0-)zdDySXKL?c{aQGYmjxQ8op{e~8`tbCK^XxN73&zm z)u%V5I2FN#KPNRrPS4uB96}Sy0N@3_tUb)MEs1%KRdY<5h4Q;NsOdNL7=j~cN6i!D z@CtMnH^(cN(IMluVJa$neV|QAAO%P7f-i`Z57sI&XBz=(E<;0tN}<#&`$t^JmjT`z zDLkZ!72sNHF;$aJI?~x5(l3tP$lBjANze}Cl$wi5W`k&zu3U=`BGZ%IZ?l#+=~jOe zvwB)!y|{YTpeA}MNlBgeLaPNya@YU%J-KzV*)*fSo^0{IQfF}f7hWz$TTK|CkuFHV z7wzxuY@>%+1)H8azP#60f^@DRii}W&V?+ z&J>09bl&4eln&g!w-d>j?6k3TTlqh8yCqCc$#1$xRxfqUt$lIR{Ty5|Vd^GPA{8># zM=6$2L$E1##X|>Wb-86i=)g}BN+>##4PHaTcJ>P-a0~2sJ?P$YfgmUT1a48+ zK+(7DHO2j+(luADc-LI82@_H|vlPFC6lOtOd|0?xVO;t5?Q){5Z`~fe>Zd+P>f0OY zB*k;~DYm1GJ+GBg?Ipn4o?ud2{}VWhC>%ZRtSdyY$lZjCI^S}sl*c_Y@RqDG^Q^yl z!o9^ux_BEi^nm-)v|%i8POgw(w{@l)Z?-<`>@uwq%oib95d#r$D2h27z(;mgeK zZbW4swc2%N$;IgAm^A-3Afpi-Asx0c#KzMvPOpSHS~luXh?bLaj$@5#~#$CfFgFlaNp)rqQwAwO!@%*RG*|a!im1K(q!Jo zZ#{{I5+1E4r5cn|wBd+7fi?#t-+CI|*ckis9CgO`Rf1$-f&ACMRz>*9yZPlfW5V!* z9I9@M2YKXq)6o;JGT|TD&w-_4|N52kiMU0CGmAv@8IeHR(D;UL82S#!4I2|Ul1vvK zQZsca%dC|nTkenPa41L`(KTY8K;x>g*RWq(+uy0YBy!~4cq2xbNIB_Jw-HzoHu=SjMJqn`HEoT7|*Xyb>TcGRQ$7PKA_m^w_Ue zHfBxaiJQG5!g~YU<@FiFOPqBhD4?>KI8Zl+ugnozxf5;j(@!Hq>z&y?=@4g4&8_aG z3D7QREQD)k1`~w)6AQx7 zs?NsmlyL0ZF8|*_PnDni_Kweraj6zp240|3Kf@Uqh+d_hgYjbVNPX!I5Ghx=<%u4N?2yD3+QO{!CfCURpZ%Nf&rx6ev8hlkI4N zEoe%>aNWeg?`xmq?wi}TknP;?s*+dRchn^n#(*5uTJ z8B^%ZtvAa1H(Qf|B?qo%wANW48fjhd3>GJ+E3z?{07PK9#WktD?^v$+swK27j`SA{ zU97Kb>!XIXsnSHJ1ANojJ*bD?Be|kJO|{;BV%yDyugM1t{(g|c3o%8`op0gIvt0qt z)pG{I8Gm#UKi{XNJdACBLZ6<*KIP5!m>ne|WdI?e_l3^arGo1FX}lUlw>%7AW^XY5^TC;#1yQHhP@rlE!^D?z#J)Z5F7~leD zj$2bpu9*P%7F^xhtMl(Fc#`m|JaRZTAH}Pc=u4chG-;{7py%lZGIgt(@%*#5|9ZPl58_G)7KYI;)6E>3*FTN5Vo)_WMkk zzm-S?tjnDK_^zr8LSP9N)|1?cJYx$D;W+h7b4Ffj>aK^?gHTN$xQn}4LsT@3%~xj> z6O(qq)0RjL{z0mdiRK6%5}~d{x@lwM>-u5%JDhHOpRzaKG_Jo9=x(FPe@@*2N}2=R zTbj*@P_h4qum6C@`VHTQ@tcw;$(|Xv?46OyirdHvAu~zI9vRsxdneg4?(D7XWUt63 zn-D@~+3PuPKHvZE`+uI-^LXVmyu|x`jq^Ot<2cUi8e-?TFAJC%p3-vU17#mSrPe=x z_XL#S0c?|Ob+W0yr^rH(fkvuKmUb8yjz_ zR5+yjTutTT7m*jgPGp_mV?aA66eH14jGVU`y?U0+8v(tlAFv=utVf#2l~DG<-;i>U z-!YYkSFMg;l`IY>ppMN{_rYQh7QRzskh^}TTTo+6t$fNpd9C|&3{h|Tm;8WdlZ|Qk z*+|PGb)#nR2?TiUQXzWQrTce3E8*D75r$EY6a)tZi2izUOWfh$nuy!BA$U)dO4}t-P*i6n#78Lsi{OLQ!OI7wy5FGg%jjdA zD9CpB8O}QLCeW0Jcp*#@bw>nV3;^vL0f1V9hqtadPPrT4_fQ7$o?A~;MASWv`k2v& zHQVb9c`lr0;PjXvbR}d{Z$SeNOiH?xbe4qnv$RA>@BFx#7wvgvK>Zt}fc3CQ%=;*m zVgrvO`y>{$*5E&TgI@9ygjcRSF&NQZ+@9-*DgHQrKtzc-5U5!x0E92*VMxRGb zo?42zbvj>vK@>lGJD|-X=k6vj+weRRZ7>Y&A_U@)|&zx>&(^b3Sai~00Rg;)B zDHd;=U?Uu^2^hha9uxGH1TI#g^vU;1(%0z#{os2@KDCQK`SRvsy^;^$fqG@`e!6pz z3!g>}knee1fPYTIC}XJ1(_?aFu;Iuolwby|K__d6FrBFOavA+br#t7)IwkDKosBML z4sw^8A$cK+p79X|IW;%PJ8zJI2nN6J9Kti4XslhBD+*f?m`p%W5-W8f7u!QDKf_20 zCg9-5aSY-=E$j?gZ-e(?BW@s$&dx6IeU!{S;W@Oy^CO=Z3t`Fjg+94{K-&Et0ccJ!G_^KK-0LwY`| z>#+U8P*+(#ss?)}=&Gh{`#fth?xJ6lErTkLrrXGHgmKXhz25;`5DETl$oSN|xy8OR zD2$M3(~la=h*zY;WGi?A_@xoFWooYm_yz7*qiIae#cg^+Fx(&!P(=H_Zs zrg$X_?fusQ0U7cdab{NG(vXUd*U!$(v=2`Ih#Q#GyK+Ua>iw+)0vomLxsVN7Os%*| zL9yA?{U3*oC@TDO6+jB4f+nEn5wH4jZ)3nE zaU&W%`=>T-Y_0n56)y5iH)33C7T?(^UQkJPkK6}YgR&z z3vvQ|;YxzuzLMkc_^fAH5WWTGr|U`4MMrc-Hah<--(8*xOFgupSvP-<>tuPtKHr^8 zh!8n0r&!f7sO@&PoDKW2-9Kf z1%AvDCN-IP5@q_eXJyf<3K6VzU?YIu~t+Wk)@up9svxRKzMwhfk=3zKtVtWUN_ zJa(H5lBHC(aX0!ptT4hy->1lYUi1n*>w3hUOT8|uT!~j$iAf6ojwRh-B!V}kq@#P& zF(<3uhZHeTK}f>C^}o83#Z2Pqr!w5lMoGQFX8)F*>KU*0zoiZ-F^Z#yuibJ55s2kG z6dT3kdm=Vkz7F)$O~#F!(en*OXk5Qe#U3xm8V7f5$qr6$=b15^8A7k%MplU@{zi(j zb*-&^`bz&#WjOItVWlv|TFrDzCw&zbdpS$$j?kTVf=8;7cnFb`5FBd}+t!=WMrNKD z_PfTIL`T=va>#(-Pf-s)_DYL6{z&3NGZnr{VTl!UUvB4;Fh=EVs+)V!IOibiO3BZt9MdlB0~5#3^2;c$IXH4- zaz}q{NX5+A`QnR8TKH2w=YH|TeUAAuq1z~p{mf5GEaCZCzCaHiqt(5N{(r?r@O{6g>J$ZOg@z`CwnB$E4JEKUQ(vQEir`V{i6GaGwhAxc?7Qu0cl@^UF{ce|2zhG<}OFPjb!pYnygb7;|o4y3J_v?E#{F58obr*YFVk|MN>s(Exe? zj=woBNbOAr_M&q=3zG;Fr6&E%)cT&duSJ-@&)egIf~Ym#P23wQk|FFHjn3;9QO@hj zG!gMREx6)uL+^+u22+FyokPSY9f+-R?Xrn$RbYP$exSP1(`b5daF3nVzGzWr{N)ry z@xLEC(-|~2^)WT;WTHFO`~#T0(U5mq(?Ec^Yp)Ny3yb2qp)ZQ27-W}UpO`U}l3GZB zX!8Gf0klOdle8oOS>hP9o*^?fw(h`hm|Bp5Se8Ep83n(%kU_z*%!MZsr zkfZr>7^XNgk%E|NQ@wZFv&@23d}rj~vHSK_aSOI)Lb}|Bx;iqJ)~+S_vlrTB49$RyE)CfH*}stb$Pk9 z2c9foUKE~|X^?fgq^i|`m;9W-49%ZeF^u~KpVz+=r<2l%%D?m4gIaGeAF5G~@?lB- zr^nfj=-DK!KW>ik7T<0rY2BC!oV^<(F|mF<$=dnFs4>__+xT^UnOcQ6c7>|^TEcyN zqoW8EtsdcAFND-QEB#Ijn1sq=yglt%}B0*iQCAUHx0S ziT`WCi*$&Y1q?$L!=(qUd|wBzZ5BfeErLYvFxJoV33tRV9i%+Im;ZppZ|AVYY~$;{ zqQKM^z=AI5eNg2ftx0@_kn0r$1dSj)16_Aok&DG35CB$9p(qCI4xHX{bYX6Pw(EScjnRB0@B(f&7YX~ z%=UvkwRltQ^oAeOTjuRvi9cSD>~XaIveKwsIm08mw|vr+VIvk$Uw_wqbhcW{u=xr+oMVtr2@cbTDKWgpQxxOZ@Mv8s-Uq> z0Xiq7(ens^Nk~!R_?64eF-DeQz~D%cMzJd9zegcDo<{gTtA+P_Humli`*y9lLe%JO z%;M?&4s&fgJg!C`t20$ZjTrWWqp#=FVjfvb-|rqC9K3ydG$Os0fWGqcRoxW=ZTs%$ zy(2EdzxNN?uiT9wrnl8WD==z?Q+2>bN4x@;L+!x_t())7F&EYZJi$ACHVezibf44; z2~2S>S)#LaLMad$eOh$>B&};*X;gszg^Mdj6HuG*E`rFg!R7<_xoU!lEfH?3hS2m#vYL8?Rq{b{FLH|x)%SLsCBybi$q9#g! zKDGcdvvcb=JOk2xV0S)o7`p0b@xIN^Yj348&iwZoiVE0cn52<*fB|7b7zfKg`nGz) zavQjvws1zkuiJR+$J%VLArAqDb-|0%_j~_~l&yzw<(fIr&|IC}lI@{GM#DH+136}) zW>w+gEC4ei=%kdUPl@#Z)P!p>9RxA`RNF^zH3zK0&p|c`=@8EaEz8d#U>&pNDg>?n zmKD(X)uunY$6S%o_c};@ClBv$6TY3&ZdY!WVAEp|DtgDCF8;cx@PE z_g4{1T7g=9w35-)tS$0xG8hy4gi~)4#q*E_+qC_(fj)U3eJG54WMjaN@a+2vu!5LX zC)Rb!PyDVS$kNG>8rrx7B&R%e@2tl;e=^PgkiR=+{ov+Nr43!z7V^r?*U*M3q6heMSHv ziWKWVk}Aowj+fH-Q`^Uki7*>Ze*93$*7J4tI502zHy`~(l^Aibbwh|)*ULC19WYL$ zHf_&y-=BY-I{fVL?=iSmfJMXc7=Oz1E|3qCF+%V2#LP;UPratGt{_6JH+ii#SQLI} zB&;>`HXK}&*7{+!du*&{H>cp$V^!gHkgCC%*7JwTm=&r$5FBiWGbp0tR%anjm^vBJ zut09t1ogsnP&$I{LT8brl@mHD%lXD9F;(f(YmNtGeMd|-{iZ@uVJWRRr$a7CmpS;r zqow>5qk|0qHUhqaCN^xITE5`M`Dh*=viA$SIK~kDU?xzoub|Ry!9?)t*{3}=*#a2P zm|)G1GK${OZ6xBct?kw2_8$rz7jSVmcDv2j;gh9W42*{$y#wznzd}f&8T3{onuzST z)L;R|0e}oPI!yo-n1|Jbpl;4_ElYc0?_9{YKkUDre`+r-9~ECJJ4?NnmpL79(2pRf&oXd5HOB)zlhNj-%;u<3%gpCZfUBq(8 zM&i`|lS=jcw>+Ohs~JpIe`6pcgj~1q!f2zF%~}N_?URc*T$Gh+j?f!n%eizs{WcW9bB8j5>sgvfq&Oj(S&j zXh1;%k<29E`shl2876=IAm1?tt3%0Bs#^C$jVLOMD^;C0YB|K+a#w~wDF((X$SHMW zWF_Uq{-vb8=Dgxj(LgKUWiYq`%BaLF2_X&*P{#?V(;#A@*DJ=|Z~_aZN zW*Jv`Mp)l!T2a?=L_nQ@*qZ*crESbJ90nZFkdoFd4h7@CggoduEsN6xjNn_^3qnL` z(}jFPNJ_XEv`X*Xin)~IzqlPhZ?w{o75=<&gO$@t}#18;aTMFL=( zaZjI|`vXG$1f%KvZpPe2)_EHMRUgi}DrAJ6FKlZx!~eq%H(c$D3 z355UKN)|F;-1CqQp%?pL+)+uM3`>SKMC-(BrDw84dbA!=`+8ELh&bw%d38G<{Pdn2 ze_5*`X9q?^3VML7<+(gO+;HNjy6e|=7~ zGFV(pWT=q%U%+^-Rxg3X7sKRN(;tgvZ7G`mW&6RxRu@@5e_uma+dKxDlmpP2kxl6Y zuOJi8X65ejg;;@lPQUjPqX#_>B&6yVZaTQ%+~MNCbL!Uau`rfOK+-vc0s!`lI)2 zQpP+sVhS6cM&Dbb>D9HcDZK{Jm7SKX-7c_QQ1T~2C99hWv!F=qz!U*CFjzkkn7fL4 z6PtTDh-8?+gRWV?#nx@)NQLGzeg<(RJ_`00VEZ0bs~BBo0J(P)giC`2MoWxhxkwI% zNi@YSHy;hFMqM}fwexjX`~h&xeHC(AQ(zNbx=A;M&vV8BWr!N4sy%04}3nG(E2N+UV>cpF?RZO6`@e z9*hn0RiDMGo=$qcM_vCX3yCx*`I+6TgU_kU#?p;JGvK#wj?q727#*EWo9&DN4@C3q zBA*V=o>go4rsDO0yLz)fhy@15C)zP;zN$E0!(DW|H_= z7#VliI$2#W3Rp$?9KG-VnqTIdIDcOlJ=NB5Ei11#kfO=nZJ4?bkN&-gj{mNN>+nRg zT;@W)>qTxio>W+k`9oo~++W@smywc$=Vm*cw|uR6%FR22baOs(=3A)S-8&SS_qn1MqUWCIn<5zxcmL^l0z>*;) z&8KGcxTQ;VA%Z(dDX$ln6jWUqCtnE$3bDAsk z#%ABrWbg;OT#;c5#J==U@8cO3+Mmc&rjY`B8{i>z-*|22E_&`Q4}>V$sX-Pqm~ZTK zCm>itkr;I2vTm=ltX9M=kCey-YBV3*R|qvy*hNo(e<)!5G2SFlb|N*yg8F*rF4}K- zn_3Wh@R$A&_?vl66pwrzt3kjzQbw4AlwT7b-9dN@yp6c<`=CVif0o34h-$t*wf?Xp zNfGf1EOqKV9Mm{&$@_G*V1>$79-B94FT$PvZ-UvXZdqCeN<}V!zeIx>OQNZ=njEsC~<3mf6-;!T+!Xe5PnetDnd#4kP_DS8Bk>HWsO+Zn*ffUcZe6u@b^ z&U{sRYBi1=_N#{{!Sjl&DlXfXvKb4PSBrFIXL7%LWa+rDt(Ein^<_riC*$qKJ zcR7ako=B{{&Y1@ga~NhwRKv+}-|rie@ZQz`xstP`?D1XEnlF29&NbOo%`)`#rH7yW z9N~Xl25SH0jUoATz2?K{fJ+DKXV49mgiC>!J9;Q(wW6_{YZ&oaJ&zKg)`K4@2eAgC z0^ez%@cYJfN$(&QELkkMv`|r@!F-=uVVKjx82XJ1qj+LT^PYNmX48e$nTC4J{iG-R zvc`zx6Z1?fueWgJ9I5^zt`|7f635aNID1tzIVm_3Kjtpi)2qMo0 z+_Y=Yu9GLxZAG(sTlab0T&=)r>srV``dHO7`Z_|o{QAAW3nu0PD8{*xs#JYC8ZY~g zamI_k??y9{Jg>8-SIgqZR7w2lXE*}hqD`MMtfSSKMA^6^gVi;-*d&PK=T7pIaW%kn2K&D2(zVjH}ZZH z>QO#>8cYSF$f021xsnEoLJI6A*qE@J0Jh3=ThJYDgd&Dt1fsp2y|7}}-P&(z$f6>j zv(;R#BRM2ZW{)(}jLoE9KM=*o-MGcNJv1^Puw?&s0n#=G2%3xUFW?orE$7GW)*XC) zEV^zbxi}p4E6u=Cf-re*~OPU0wDHy3U|MCiqsAMkHw#68m__DiR zZ(z4P;$zue417QdEdSk!p1$u-Sa0pc+9w^cJ|mkWos z2!y&Ohvj4y-pT&ZqFOk&{xN@^ZYi~dvn6MNe&rjvGBLk$-uU%H`uK(oLD(-Q?@e3J zOXm0xT|qa8WRcIhVX6DzLxKRI`Ih;uUVWoaZEs_Q0;*c%!+qGQAap#|?&ZZXazDp{ zUP@5NmpT*`I5RJJf8&so$)d(%Rb7XB+~PHxsnlv-3HN15gg zL;(DH$oz`06S`RLtICZ_e6spzQ_)raMpwGjqdoN+W>q2MlQ7+P^wmewr`>l$imDtD z?`S8>{?vdmzk>0>rI&^$AuIMQoxk*DBOByi*#0Sg@W3@sA-{ohtMG0FZFt4O45kZ2 zHCq$$>iFe_h!RtNzX4198%f$?!i9=TQil!Bi?NjK!$KF0Qql4sz z%ArR%2+^&7ymo8PSK;bV2#5_DIfgzK66gYDoVL>kESOf-aO+O+3nbSB5V{|;&`#RD8uAS(642)+0y zD0r&z5ZWcCL^Crp>w~y-4K~{n{&G2G7?Li%)nxqLG)2QL@SRlb%_s2NOjg^9VxSH( z{$myQAvt01)4&|Q$ov|lRAQWkx|&APpNIS=zixn^D9Zx6ggL;*!SM4~004xhCBo(> z3jqGAmh7l|6k|OZ1j>sWr`t`C|E0SKJq>3K_r#A!$zg8}?`$MI-;uG^K+ypqTssET z;%wP$JKF$yIhNg2hEO=WZ@;vw z1HNJ?lmIq|re@bil$n(||20wg2dPs)m_EejKnq1^d)+pA|1Af4MU>MUZQP;q$J*h= zzRBb*5(zuBca#pk+w;q5^EF$dbJ8xpr zJEzp#VeUoc>*Ht*<~sh@2vE_Zw;N}JfPe5wC+HVANPg!jKC+J-J%$S@fb;#Z(50|b zxcBdQ5!#fD?pTHSg1;yQ<37*U%+UQo#pT2Qn__BPIWDiDdp@(HKbeEwj0i-!AjP$R zIJ9!$9P>GkbKt%A^Z)3D(2pw7<$oq(k_Fov)%frjFl_2D;1)_;Z%R-Dk$%(pgAwAGFHar}t}irWR8^^3OMaehpLwtJ%-a;@ z&#TtkVt8lj>8B_^6{BaPfZtO7vY8Y#4!qC#cGUR_9RuWCiCmwH&AB97MWCgD(#P8s3FRSs9`fO$R_)J{ z4O-}O#zKIG52_hd#|&!e7ps(ml)>lJHGBB(O0N?Bp5Y*PwyZn$t{(5TD^k*J`#dhV zV(6xJ}uM-R?S8tg<+LWN$N#?u)#g$CLz|te1;J~+owxTkGmC{O`s*v#H$Mae)d3n${EPHRRR~%Up2EL50!dN8-2#+P1+Rof&vTDiAvR+#?KWMYo6N)uE4~t&t!{Oj;ynQcQr%IZ{*Fw zfBnq=TUS4wA0$2*n2&%dR_A1at|LqzCJ>BZal!RUak) zdV`JwUc2u2Xz{Zq01USS zg;Ur_fqcoWQPnxT{Qz>g&&>ZwtY`Lygj`1@fog-Eto5=t95q}=f5uukNVz8}UUV9cgwLIkF2E z3J?hBQ2l?EZy`lMJ=ehUP~xJoeKl!G!1*Z#4EC^EA>q7je##({z6;{$l^6AbWPxdg z#PB3w^}DJJ-vu}w1Kgnz1+of6P6|OvN4JbiP}}v{UFFiAbxREJz^eO?1YMgd7Vf9>Jqv=c{E&X&KR(lF zqLs+Lem`KJSIfVH=#(YuGHwOf3nNYejR1Uw|Awh(h}`V+yxYN9*FT?WRzodCwzAvT z6oEs7#j!u}aOn4*R`!~E(2!1ou;7x=aq~-_#~nP(Kyo7(G--)7jZdwoW3#doHlNzk z`MTPkojwTZqiW}Cw>fh+fs{N~7DuFsX%N6T0SVaxhYX-kbn)B=tf5}6za{$ZY|(f@ zUjO3-7??pFWyjId$*_m=IDVhQSc>$1?p!2NHURSoh#MH&GNv_0C|3xT8$otksuv#$ z6s&v`O6U=Sm*vsCLUf{}<_!45e!5^$miO-5aEaE%e7_@;U0~oJaNcv}lP7YmJ(tQU<+X zFU6UiPE`LCT@{z*Rk1=i40Oa^}5sNiP=(~ zXiuBvpL3QXVA3NhO^OR2XL#*v=)Qmr947`f7vFkRH3c^50)8wE!YSjvXFetHsrkNg zx->p2fLsB{1T)(Dqy(EfIUhU3_Yq-*2qZT5j1a^H@aPA4)huPBn4}c*SL`s09N11M z?!e#b57?-{li{}oixk}9v2xd2^K4#SEgJ>J&`&}tiYyj?N90&QirMY7f?i%V7U;o3 z@O@+?hCm4u4~FJ6hvYL`DAo`!Tld`|ehguLm$O^R$8-l(jOiAJ&S&-C9NN1Iw!`|f zS}CcO%H~+DDHA@v=>(@Y`>1h2yVB>c_ek5pAe=9zf*%b%EtXlE=dWUyHc)+oE+v*+ zee=&xZ@ASMKei0RkOsQB*#6|e>!}uZg;vR)99VE%+ijd8pJqJ~BZMWQHbl7_p3Rh7 zy<4`6u{Hwx(r3RxpmIjx`C~{=vqp?i-?54+DQmrll^o<~4ZSuP_}sxW7u%gIb-%g{ zcrn=j=|h{W2a$+j*?c{4?GucN4F&@}`e*z%?cWMA?o^Z>_s0I&1x1~*Gg7Od#xIIb zm1&`G1Vd>i%T=KrQr?pn3_7;6-1y0%@xxBv0-F)V9lrDbG{B35`jquMk;+< z0WG6a^ftJp+amu9%&|HS5s*OOp`U%&oh`~3kdtQDc>8q#pN}&SQ%sbVqEx3WD`HoZ zg$OGd4spQ<6Jw<=DQ)Zd*9El>IHwc@W-8)-Z#OS@|HZhYLG{q8(KJToBGt0xpyBu~ zN%16XarKlGXxM1c>s1o`&d?%0k#hN`)s6!sSe1Mv;O96~p0*c*_xaJ~EU3RdI$zhM(MBV8eTVh*t$<*bF~=`W9mwKN>1R z(_4wZCMMFSPz7*`Bc}bWP;|H5tYa%y7#Qq>YOx@s(p@!Zyb^_Lc4{45lkGW zDToweIY5DS`;(1JpUcRUvvTW-1}XMlVEhlFtxauO&rq)xXa_*;y+(uXyoS??5r%;1 zr^JL_3pm5Vp#3EIt6R8DPR$AKF*^0VV5wAsuxXQGltL{zhrN1FZ+=QYKiT(}p19b? za4T;*Uv)j3^=-Cu6F-b@mJfDR5A?3_;3QVD$E@P2C^$Y)Ji2h{hcdNbIk_Uw`$wmz z5V7v0b(VUF(T9tVvaqtUCMPE9ix*Z;(Lv|$^%B#q{+dP(93I&j|HK*Lc8Ml#?$t}$ z={Sf<*Zsz{{ho(DGCxEke3dyZ`>f2TjAYF0=(TLrj25dbDMQSp3u=&WghJg~K9(`| z+c3vEUp=szP1>-lwN?;i=PNbcoia&l3z&WKiFPL-VCG&m&%E8+cdxBi?;!%rn#DG% zc{vo@3tJC_>NCiM1y{S?Hf(j|d`icUjtWl9vQnLCUZM-%F?ju^dvk|>bB>eZ7G-os z>?Kn7hBJ1Ku_$Xa`nP8bwW{YL$Ii0g9Pl!16SC$)jpwQ!kq9((wU z0tL%QX8!egb@Ne9-?>)3>tp?%X^yBfJlT5rI<7g4XmCmP=FWBMm(L$}lIRpfYtXp% zRIr|l3jV-H*{`ZQzF_aB7<{l^NGM=;a=$foF?$xiu7CHvh=ve5jPU4J0RN(B}&J!XqZ!FFWCp9KP5E3-i;f z38RNti()EP-s<>YEOm_<<5@o;D(DC;%Chts<1jyOS!VAVNt*3f)v>pMqkc+=^@~+D z@O|+1rhhNMt4X_xv3l9ee^suk{F*h@(>O#+DuDA$H*52Kh0_YxpYdT@Tl7hMj!sd+ z2RcoyYVYehOG=c4Iz4I&=JnUH`GteeAs&~Dq}eJl^gBGT-AMdf&lWJXyd%zY)_a`7l&H?XCq(k1;GjB46~ z$A%m1l4|V}M;_*ipk?~~R&;b4KhK)uL3b6Ezb&m?zl3eV2mT4+u)fAdl^ z(Kg2`_TP`#Qa|E)CL}_gnZ+fTM>FZ&K^q&Iy?sMud+*K_>MaAdSOFL9Apu>E{{M-p zsgI3<@8{2^bkb0&k@Q!&e5IRuK|19p+7lfXUFJ|bof6l4Q=^N17hk#cp?riz?1;tSE3kN)mjp589`NOiBFu?SBq<=0}ic_w7PL+(H z1`PVZ!!I0jW??*M1dFWM7*+iRvcWw2yQSZ9Jt0>+Daz`90Na0i(|%k3WluHZbARjPWZ!WYdc| z$)Vn4kmQ5$8G9%s^YthqD~V+Bwv0vAn;R={b9{YtZ-$D-XD=~oGHw=BXsBl3Ai`~_ zLoIVe(+65qYIxfkJ!HQ+4!6qMK{s5}sm23DN|5;HFd3I-)Z#gXUI}qcU;Jd)X@`E^ zC>j>VcQ%x;MJ^pQ+S;ij0&r__fP~G2-^MIl7bN%OdyuD%>~Cuj6n8 znmWA0zPxu%{go;m-PJ;(KtJgc#vJ`Ws1>b;yEQK&&aK?Z>pRG6m$~wLKIwgu-dAtE zNo?OE-@X}fvmk zX|+oSpcRH}vj)DM+(uIGvfseuHnq!b9Uhr4sPL_3e;do9`i>9L_qi#H)Mm9yCLRk5tMoA!6g`Dab3yrSZF56PrMUy-J#`B{F*oaX`2{5hh`(F5JM zud;j;nuLAur_UpKSj)z;)}$&cOe*0`ccf;%wizyhlxN+8Y3_(y zWkXqcV*Jr!qtynS6;S4|*tnLF8K!7Nxsh7wuCv$l+4|AiTb80g*-J#WBvS#xv1ICG zti9&1ND{0!Rg>JYttwinT+->0XlI(PjY%>GC$!y;NVbqt3b{Z;$rr{GS;Z_((WxK> z{!M(I%~t_06`6WpsF#S69vJg&Itq$8(uRH!NaMY=JC-a{kx^1XSW1xS6T*_&A9z4t zjeG5y95#=@iN&S$t~$D==zEh-=|b5KvC@BARZ z5?D=CKi?8*KE+l}A@I+tQmte=w8*KyBho0Qa5+l#ZEB_SQQlvLw+ae0b;nt8J~T4E z={p`Zf9C~}F`PV|+iFpK$NP|VyeP8zh2kfomC!3C=?5BNfy_j-CJ2^FuD-`o~*3U{GGM8 z`#T+<2joBAvv#5icZAi=BmF<4WzIjpy-f}ILCz|`Chh8=4#G#Y?3=XraaiRhhwk({ z+($_MFKASbgWv$4adoj47*Zi_(FQ7DM`&0E`ejsE=eZy`(aQt#gmsTYtZN==BvUOt zqw-@!dMN}S6q)&UE{HtT9rvfmbrX#iqU=AUBV0ZMYJ#iZUe%|)681uTP!qr4dZ#Bk z>FvUKB=t21`}=6Smlps`QJFXJoiB3v&EnK}=P@Q>kr7?d6tCV9!Nz;(lPUIn!2lI@y}B8UW`Th$;Vrs*=&>He@!{r`e)XKrN6JXlAPGh zG;p#Q*AY)1jA+nzKPApDuFf!L(e-q>I2vcJR3$%aZ|J&q$HrRp_s+k&{$HvT(+SRV8cs--M*wtLBR)S}Arr6|pvMv#WMl`hS{U=lr5w+rIWA zAn8s3n+wzua*BGt!@cHOJ%|djCUR(B)aj^{48l@-@>k)G3Hi>w);UOsHIt%^JA3V~ z+nc-Ic1v$ER5^Pd=zYc+>RytCN1dwchr_%QA>A$R-57Q%ElL0QB7)??18P`NM;9J6 zyU}=$ki8|sqC?PicWge+9Uh7A-PgVZ4(YPg| zFSRnr!B&RovBKR2hKxmgrmC?2rdM=){Po5H$E%I$Yr#iqhxS)v^w;o5ZyAMWRmD_< zb^Yk(JJ!eH>YO%M=ye+6pB%l_yH>_ZQ)YxI*^oh>RU$2j%gX8pJ*5vX@TL7=ojJEA z88bLf_)}8PRiW%P61{6-rYN^qa*VQ(9Y!5oa8Zna-HX}EHZte4naA-HmTC4JCqCJ% zJx2rRhw#+O&F7H9eQmV*b!ahX{NHiIflE>Udp`i(T)FMrW0Utpp9>B;e#|bQfoxmk8jQbSrf-C^K|7`~L|}FN zN-lvHq5V~cSu7@qe(&!KD{o@RLe76IE-OZi$MPGm*Bx1?-SD`8c)U-RX{k?Am746*8 zxhM$=lDG>Gbn5~Q_R52p%+8UmmPZ7dK3x<{zngILZI6{{&hqxHc_A2->L=)bdh6Cx zS5>~;gwpAp)nebxsnYnn1J;gSyZU1(O)l~E74H28C9x!AmnUHt}L9YG3pzAA5@414Qr$anVY21^uCBI%zin@CF(@ zFVJOo7wxCWc-opV%!Zhw=+gvVyAPWSbGF#}g0AeO`1<^_3beQXVNKe2P|%mrHx!6@ zH4_e5)$ML5R3E7bAypXUkLFF@HSNSOt_C{h%@@Fgd~Bl_MyO0xH6jX9P~ zEU}THz`a&8yCk{dGMYjrY908kFL;8ZS4nFzKjH}EVW zgb%8nRQuw_H}4eZR#5?lEikwPKY$j4u;dPIH%?N>?3%L;i6vTvQ~bv-DiD8Q+3B!X z8+rU!;Y)6QfX?f7nVP9K;@8oSqZq1L8Bp0C;4KccN%emag;F^Vb(|QFR8J?_AlNQX zI#$(PfKf0N@}_GQg|GLRUr7JkKcT}d>{pVpleYk4DVBN;#N{^2hoP247@Z=AzT zXsIhiKk>Z)v8Crw^%lH;XgEQ#UL0+HsL6pROZ#``nsx;JLs{234wnu_H9Y0R_Yal+ zxzH&#{e4?8xfvALXD^qs@Nk2cgQNW_LF?=2a#g#b7YIo&F9~wquT+KARnoUlDi@Ml zHrL!qru5xOJoi6iZoP3%(PaLvTI(qB4#qF=o?E)DuU@@mdY4b<%-dr^hp9lk`KOE= z+ryn*uLf<|_L@K6ehbs1sAy;`gFYW`phmp0OYz=JjECtBM`OPHb!?3%2^q^72wNO` zDXWhYe=aXOIr|gI43;`;JfMct-Y^>~;GNH|s{Z@0F)_J@Uu90KL*(5AiC0iJW>W0(4bEIHuzQlWTV7S-^f z(QG@TXVu=lyZ3%KJmZH@D=g5e=PPLNGe?N}1~2UaP%&A0MqcFC>f z!w^%T5cPY;1n29~Cak$we!3r(fMJ4Wv@v7beg_c8R+h@uEUCK;oA=C#-kCwMKMI`* zj*^x!)%bS-1ZeSo%u3K1C-naI*H~$7c0l!;v`HyJ9J|gUiq5m3aFyysb3V%M+QR!` zcNV2veH8sF7ECn00fBEErnf;1&fs#_bPy&6L+ zz1F;OZQdynZ~bKh1Jna=uu8|HwhAa2UMdgf`IK|L!<9cQJ>px+_tcXK%pnMzJXZZ> zmD(iCstDJMLR@99Mq+ErdJJXZnF9`{V+?qTT%`#L^A(ztt|}CgxdrKS{T2(=mVtVY zxyWnIj}K#;@BFDu^QJnRivxr;c;b-qUG^3t7Qj*!4c-V9XSsyLt2v{zJ_INvy`yVaOaq&(e@34Fn!7TPo`1v)sqX{0mGr+!Sou zGZWT4Bk##~rt>o;XkTmtH5S?{ithsNosEKNM$SfW zjS8Y*J>Z_rwcfj*N647fw+MPiWgjuKQ32O$WLn(BTq1~}eX$VRY<|zs%&Ncn^)ck8 z!A%+S*);a{sGQ#Ot~^)sliz6$EK7t@30$~CZ78Q3e-Ju9wk9~+y9K^PdzKHuxb#^K zz7+cB8~M+9KKe`Iez$Y!mL^2=~w0lU;R>m1xvfl;O||v`(CldUBRV z%K{PcSw&1>k<_+1%p1Q{)eAZZfML_|NGM)ZYPqKx7_t8^>c09f%B}5wz@S@NKtMve zK~h3gLOP^jq+7aCx<$Grq`Rd<80nIb4(aZO?;btpJ?D8p&-({_=Lghrn3?voS;i`FNP%{cl+829YEAMwnnJdplsO-sW4jTs>r8Z@v{XpfX|l==*&p9Zfkb; z!j@IH{`LY~$z{jzsx7oVO<>z(U=^GSdAwt9IvXq+ibp=@kuL1`;3KcuT6{x zf#I+%Cub9)~Ufu*#ZuJf3h>H1Ay3T8q;s=9$@3JY~-im)_4xj;9R!qA*8u zO}1&qwm@QK^ZYJW*agcvAKKkw$vfyp2a{XDpK674GIZY1@~Va1+#pPs>ZKl0f8c=Z z*^J(=tg2cRsA28AS>fIPj>Rk0nm9czINfZCd~}MX*U^aeGLAXv5MRq} z+g-5ElUen4UN|~d$B(qB>Cq%F*joVrYnrZh4)Jq6o!Nfxs9CUlQTag1P=cyO9)PMz z64OJZ&q;2$_bU+Vz7|V4gAozO>_$F7t`R2)kK-9A#;J7`5^3n_Ipg=^~j9Zj7P<853?%>@SHa8h}`N zq!0Y7fL6!WU>SR(t0*$JyG_X}s}Qbi+oRb}^HVWiqLLZa={7{oFm2%>-g1OCfU+s1 zna#Q(P^Sh7AtugOoG1hZKX+X^;$&Y)cJc{cPFwGvucj@XYNo+Zrcinr8c85F*J`U! zufwE?`Vy2GSpD8NRti{*F4J?>*n%G$$Kx}BMc~B?)D(*%Y3`w`y3QoJjVqf&O(?%D z4o|xD6z2u|61q9A^il%@aV3O@WHaMT@*YppbMFvGwB7p(M9RN<_NA8AGDY*GXRW?3 zgC6-HF93<{5V;oQmFLqO#lMxl1&ui=0m#eNFq0ZGyd~NX!hjfiayFtoPG&nu7jv(! z8SwcaFGqEL0Xjgl>sqed1e|1x32DDPu`spOl_*JTH+@R?L0E#TF0}HM5qlp0^cY!C zhN5w{)*zi$o`yDUD#*ls*ABZdLFKG&r3QS*(Jjx>q8zrkTa0Y8Wu&tu9@>?F zSB+V21JV`OhS=rv<i*I& zO#iTxgsY(vBj$8AVt+~7nuDdW5;;KlhZ~dEnz#@unzqLsG}P>8UTb&O8^9Ue4{4)N z{FM5Wy%Z2Mu$t&nZap{aYxmmF*o}JQP3FMydLVg6cVMl6O>`pL5+t%CUny&X6-W)@ zY*eA2lg2}0iTs2mR6c@m)5d@H79kE`beyWZ2b-E3IA^wM_BfWX4?s$dV%YH2c&+^6 zO&!aWO3*6c#Js<3YU$F3a;-%LHx!V$&gss+rw5$*^x?$YH0(&^oqGZVE&+8^lqGz> zJjkfrIZ1X{sot@#7sVGRQOVxDpU?l%?Av26j@L&4a8~{j8KWmF(OJ$rWD{>x+u+*y z2$LnKo1?3@Ug)F39Q;NC#nY3~@LoagC**sEleKD;IDmV+x-UYj^Ns+Zp5a@deKI)g znSBHmVz6x78#H9N?+WoFBGh*)K=+kBr$yxYl`r67CV4Eorp%d_+v)sQ!nD?4mAUpe zZ{9f13#*Z4S5(loSe-J-nN}RnZ&|1Xouc5;wipR4*f!1RxBH?zeJQQ?+0NGMHu z_4vp=ypro4ZE_#(AKS8lm^Vc)9*#oBJyh%;YNu=xc!v8{cOC>f)hFx47yvH3RZk{F z+V6qSs|7rpAX+a=m;^VGi_LdERp&}fTm)ca{)of_2odQi=4$P`FR3stLX{7mDSwVt zzL|U%G?y{?v7tyM>oW62M>;+ai1~A0YiBQfcBuRtqOgo+9r%o zDIPrF?5?n-w98xhM@nDBjIwc`;<-Q`@DW%&MB@$jBZakvK^KCS7COspmegx)nWk}{ z5;W!@EbS{zoVl~O${CBL#Ffe-v|NjYL;rb^WViZ5{*4w(w8Dux%`jp=Ou?&}0SE+E zxbEy8gSVL`G`0Jydio`ChzT+m!C}e9dsWUr^OAma{?tWb_1mqik4xmm#D2wNyluv@ z_S4sA=klw5H??J_TsG1%MlVZLK3O)Go}MmUy-NG+5n${VZ0z`JF@I_x?j|jIDuRyU z>aJVbdTYV$_fAVR*V2Wbc6(OB;@@U_@{KKq$u}iF5DJlMzh1_& z4FP^4SNQcWyj$8v{1JOUzQA?`&1PPeRNTP;9TwuiOC5N%w%nE%TefX_W!=$X*MImm zxz-Dp46MPI2ZhAMQV|-d-1zd`d3lr!;w>k_`PpQ{5Nv98gV>^CDp??e^&;X138Kg4 z5e%1MbJED}K%!;R5Td#QM{?vYX9xmjSQ@b_8U4{Ns14OI4D z<|Ek%BC+42`LJV1kGrUT+2bX+b$DNp*fY6Y{O;#$9FC<2dMDS_A`Q(L3oqQp(q6o# z2*^2Ip&!&LqP7CUg~$Gk2`6YF#Ps#d@|WZx1Mfi5`eD5$%;}g@>eh@3K;7QRYA5=- zBh85VV~x*cHghj>_S!J_HGSzbI6Vm}3i{%t7j;AIn!#1cH%#=sUe`P{ppw69Br)~r zEeegVIFO_&3V4T-*qLUdhN#147QI39e)Pc;o%=24!*q=T&@d?!2v?M5KLj#8*Me0A zpU33m=hC7H1m3Ih!PWd-OwIIaB-Vzh#_E#fH)xM17qVXlK8j|L3kvs0@$?ZYvymNT z$}3WP)SZkT;i10ecGJoDCY4Zvv=t;@Ne@Cm3;ZHnma5SdR9yzejzL+$x?P#8ifEI< z1zoL=MO?DenMv0X>4djAsHsWG+59$%h-eMw)a*f5UNO*F6w>5Z(cj&R)9tqNqGzg4kDiCg?|$l6xZ>>!}y`Gx&27zc-P z{-Tv4Lx7I~l6amLhe_X%XAn5iBVBr zvN9e{qezb%E&;2QX@X6ls@MUAOZ}tZP!o#Bo#ERtfhJr@>`SQdqV3S^IlFrf?F?=0 znVM~36mXOl5pk{M@7E-5(jM(&+o+dkGcJFC3W^z+NXhS*^?<6n#{Z6LC`nWlJqIId zrT49a;7Je7@nKn(r!b(Xd@YJ_!=Ay1f+qyeu|g@)h7ekn<>~ojO3pMpUx@Yuw6-YL z1$dadJkswLwlt~cKO$$8hnkRbCp*4v{jSLx^epSj6CdFf&KW)ZNZa5d@vuWx)xk`s z4dSbPgNn1+Y7;yJ#i&_^ZMVfT)LKo`4gvZ*-ySg?UXYd;0BU=&^>Tc1z{?qbHQ5f+ ztXdG9v+?j@OXg;QJRrYQ-HaHE>&!68U5=2}HU;iM_)hW=L~c8bm^3fQ1dpc(*{r7d zepvi`T;z6|%3JDox_LI#PFi+OCjxt4a|(s^c!i(H{_Zb-x0nZ*_r3!aF5_Llxy+3ycA@jdr?8`(-hvBRbqKd=Kg7E|GqxXGi((wD3_YkS#Gx{2S>;NZ9ubxC}o23@?e zi!R**Ej==f= z+X}`7AGnoo6&wCMD7PB#m9v==E^OF!LtwoeyYT>%r^$P@S^kzu zC)dkyVhX-h#qUSZkg1dpk2M6Y6bF+4Aq^}~$N*}V$V_4jMkptsi~%W08Rm!F0+@9@ zBU=h#j)<)`7$N-p759)eBentbgtS-U6Z|NPB5Pki#XK#!&tv!NxvS>c~hQTilZQ zaofWimfaal&;b=W0Y(cj5$FX%(I$4lKGV@EIfqJYP%eBc^HRUgN~*;nu=#Ynk;0xy zVz1Q-9L&--8vaB%Db-nEVj|qMPK>BPn|T53cN-?2D@8^1c4@ad@KSpAwQ&{>Y!kGf zrEikqM4B;iQpW<$0mPfPh77$)XGmWW~9WxDoC?_PmnU09%ncJ;|tAzF%B9OZn-QR--WlEPN zk_v%0%@d|Z-mC8hqCMEp>!#qIY5~pKe?|N-m?>I-WM#!(vGAi3rjLa{beE2bDdv$8 zLYbvPHR9VgZn%SafXWDPE>&?YZWt=FW5}I1;^;kr3~)l*|6&*6&GHUt zu*jMHA!;w7Q>o}o+Hqkex-)lt-r%U#e^$-`{YIxyQe{u)src&Hu$8b0MQaf zFy@_N3wwJ|8isrq^6>*jq_PV#T_s+=!+=ejh=FDf$e!rgLC=zM^B1q6%L3q3n{N zh>HR_6Sxt$soF`CR3`>&1=@FAG7x*m(k2w9u&=Q&p9^Gzyarsp=-EZyz<5!-|P%9(FH zanPA#vnYC$@e1~pSMvfKi1$xNo%+1fJ3dqwk<}igQoT(RLb&6SZq7naSEmEpNR1C@ z+?+doI`DH84e0i}qhu&`MN9WNJ{Bq5(^{!ZOt5i0HrTy5_2}f-b?2@NPkz=~X?q|>wYp0!BZh)%x%Tgx#=*S+x zi!*y&Ug4Q>3>szmmAtIO>+Ph2%S>lm1axMCh$(`N8bnV|?%WIxss-!A_<#1}V4A5Y|#zoFZcJzcQV#U4CO zFmE7(8xP!iOCo-669w;nsiXoYi=@;TAN75n`|RJE^$bW{znLuHA#2O}NRI8%8Xd&Dbp)I&$_~Afn-};k=tB>Zi7`L{ zDE}dqtjMR&j%0-wg{;&+vKV!W4n~Cfu)(&}(J`~2UceN_q0(U>Nc|=!w}pxY`ol>y z-BGAG`YG|*uSW6Gc;+>WYxJc7m6qX$t}`T6<|#gReB^VHgeFHHJTToiO`>VA2fOSe3c4OXbh~ zrqpKPa@MWQuo@ujf3)j*BMR$W738$Zv*mcOA!=frU*@3_^}+!kKN`pl=%#5p>Pq2& z*+8<00}w+PxV%{|SMI@;h%L37YTn5RX2aL_!>a=nXuhL2RQ|Ag>NT}k;T8WrGvcut ze#Tq9A5MO}I||?anHka8DcM{^y}=*xWhMM#!1NX_N>Y6D$KIsR_5`=_3AoQFyhgo5 zf;P7raPSVHPt*uKju7-OFp8t3h6={A*5A2Inv>9-%e!m6J_t&nzrtM1M;^R%ps8mO zZ~$*KaOFx^<92vAV4%;Nd3^H9jtOt@lYh+Y2vVE<7PrLoCnNe$*MiDMZ$^7|SwK|| zmX&b6MWeMYhGm)S&gq^CpNuy~FgehIF8U=DxPiSvoggpDSM0MPAGl{nn+Yvx&B0J8 zjn!y%u5mxi?pA8Eax}Qirv)od>O^wZQGJR>tQ@nqJj7F}JkVdRP=6(eYYR*_u996Lh}SxH`uw zSxmO;7wwCM;WosyLo4_`D;m?fa=HhiY=Gtrz(_<@_s>6Q5FZ^}jv3j$OeZ>jT-CQI z5zAjLALsP(9t5BYHgXcr4lEqsj{(l0ZIfSq0vPUljum#_7cg)h<7|Q_6n@KmK%?~R z@p+zSlq*UL@W%%Z%Zbwk&?SD%$vcQ17G8s&4(0~Q8Vq&UWuhOE5~Zaa+y*NH&Sq$- zo;H#G-Yydk-lFx`p4b2%M@)#pZQrPK{O@Y$_=*nf_qh42M>Dt9{3+yfb2CLq+-#_t z&J1PBEx<3fhrGow478PnCZwI#t|&UZsyqe#8a}vh9k-Df;b&jJuX{zBzm>l1Q&0s1 z3e_KDd4n{~{g$3~y7avhccM-86RixYaOC}HA*~PVyo4E5`)t6)7;*A4viuF0mH?Wm z&_}8%;zR^RoH+PvThb9@&_L)uNX#xg@B;DCd|Gq5PW24m>QRrCqtt20)xo=i&#}ng zfeVmTv^{pJ_8dE2nR~lG)kA4 z{4u!M}BoXkb=17(peiOcVvK{M`Vc&2fBpU*u;_5t4)FMOGt$z!R)DWh9}Y6rmQfCueiCwqEJ^`gmarO8D}GD$ zn`8w`&Jh4!@)yf?b6=1qKa==^ea<66Z8Kjo{E{j#AHK9APiW{RVLlabcq|W*cY&D= zv{c2~j`|Ow4Gf_(i_c=yN<*8?Br-DOKj({NWB8u_V*a#od7OG zlEO;`YjGX;M&U-=N)6;Mm^kP_@g%o2@+VC;of%_MDSirvx%{`5sSS+H=sA7yg6@&?@Ql`MHj$WaSnjb}2^G>CNN7`PZdV&jDmC1!feUkdam4 zZbjoZdh;(d{in13A2^EUo?(V*{pE@V!=WGrTtVr;Zv~WYbq)B+jwJ;hb{wS1VT9Tb z^k6jt-0l0d7{K3%V6LGopnw1qm3-|ZLU4sZwm<=m4$PNn&N z@&|aW5S+w>Y)0Q~dOE$31e>mc;J|LObYO6r&^`5R(UYaW5qtW+4KD~gf4JL(`XmIz z4>`gE7j^grDMd4G=iHn;aam4y33v{mVEO`{E6a>$_B86Z<(psshm-zApC4$j|M;am zPDkJVoTC^1_cS(Ods14-AaHiRS*~4_C{_t|EvtKx{}Rf^*oHdcYi?vKqA}SiX$e26 zG5VqngZx{f2~vzqi~#*RDxyb-s9<%>gXkbtA_jSUY{+*-#xK*TK-xdN1rc;b#yxUCwf>k*R|AYM$~zWnj4u;HCg-=cRm zJ#QX^)8Js2IA_K?$(saJ$+G-Mm8IF!#=6x>DT>ZiijEZ{nTt4#6kqNGjVCU)iNHPq z@s7N9krr2YEatpW*V73$fF}{NF((BGM zX-$3*(?GW;E2omB{{wBI>YJdw$%uqmIks87mO*X*1R2Q4I1)P4C-Ct&=1JnkdOI2 zgKHi9rGKg_h$yNcM;Tc;GN)^6nRYmr^GOC6U|zLu<6#Rvd#xJ}r~iZ!;l z2V}rKw;8bdhQ1jp#%29`D^=_aOW6is`LZo7n}L$Txy_le)Q{WgYk1r75*_FYMbsLi zet%jdnGsn8Z^{4zM7V$fVz;puXLLLJ+s7Issk;iI(yz%|KGpab#1st${oZUFj13r_ zZyfOt=(RqE011n}B%c1BAtG~qB?Boi@IWB&6?i?tX(T$xLc+m3)-p`XUR|KcvFw&riA|+mJG}!*lJ$seMdfk2Of}dlQ&c@PhAy zdkxywTIYTj_Ph%!hDhL(((lN`fddt|(7$(;PQKYj$zb9q?&+R25ewMt21t`0mOq?5kt-^vw4wuY9Feh-xi%to_ zKf!US({LWCz$;MAajSFspu}IRX~kG>@vHGIFQz5|MaHNMeYys>3to{qC@lkq*DA5; zk<5wshL}9nOCUoRo`@;fm6mCv9s>~@T5VlrWsb$j*3Ki7iTBThmQbK#=m#plf?kC6 zaPd@g{o*gGeCKZIYt9GbWYDNSee`V<5b7vSY?fAY1t?<=Vp`LcAvO>F# zr$G&HzfA@#5fHf8zmnJgBv$=GbszNw-9HUCJksc`0MCij>|TD~CkM%gB@W0d1gskV z#%b5go%8m|)hs6x=GeQcui8Mz4jgiU8r(rr++YD0{2sr<&V^)H|zMIrRtWZGf zl0;Mg^Ryhka1k2=XUJp>z<5yA>}9ft3@tKvz2Ud%@5f7d`UkbA#VWZ!4~Q<1?zh#+ z{BErmYM7-eZSN2RIu`MF8!MKQ;}6{U4{s+hL4yn#boh1mZ^!$1oZGAfa4Jx06L`bs zfC3jSFPe~pYJRC`%A|w<<8;31G^J&c2JOr3 zf3cBtUi`6ex^= z?fJ2S<_s4seEtBc{i=QaPn4vN^;1E8(6>O;*gCBo0}MgB0x9Y{tAQpqsH$ED1hILw zOXOF_$7=ft`$Vsyp>u{TU$d~|uE)8!tS+e`^IH`IlCG}&I}PAfyJK zsg^5GZ?mgiAYZY}euRa7S8E3@>kt}gy`N%?eU;-tvD>zrS`u}0+fJd|HV)R<(2O*D z!>Luw0KA_+jis5B;OY^az_~iKY95cwQ>5fn3Q7)==|la$<-q^=pMO-s|NS%sd)W;uJ4)FfcKnrOQJg zq1kfm9+S&wM-@+(GQkGg!{0Md1WVAKl}Q!N zN}Wv(wcBJs2&=+a+wb;zNt!%PNWcxz^UQ{ufaUCdXIM|mhol=)XlK^K%;^mZWTI&) zqV{-fIsRSOm)iNOkiBi|neaGW)6jp^pg+Na21*`BFEEU?wk5tx8?lTXj5{8mg>PS9 z8RBY~U~@IVEM2zcMnYl;tQ~>Rkn`tX zA&}pYn!2#30={EQWQlL(pOrbkZx{HHF=#}a_G~@gvlE04su@8xR=^Ke^(c2ffwJc?kieg@%yu zzE<{mi{|j0sVQo2sss1YMRmM2Dg4V~{%@e8QNMzt#qEY!S6{z3m>3f@`w$Ea3FUu| zQ$|*$?Fcn|!j#U_l8xP-u}?b;I1PBj39%=}BUWi@$Wi6YPw*$;k1&$|92EF-L%Hk>Z5)^ZoI^<;x&AYK{@b1Opa1iJ=3Al^=jIn! z|7&=Z2I7OBCXU?n$Gf-QV@n9#pC$i2C;$2(ReI%)5q`5OA{w`#1*RJgO_GgHJ0vsf zKPMgh%i{ctP5b?C@FR@szg^USy?Q8KBf`QE}YBOHEm=x!O#zIkCt9sg_5w;8W z?x|N_5!i#`zD%Rr;!h#6Q#NfkAQA@9WY`P_$p^j5dk~BKVo}?D`}JGAYCQFZx9M{Z z+Fmmlwd?oEXl414(ugA#x^%5Yr+`K6T$+FtHeTF}0)=JkpQffbZzRw`o!b2$B|<6T z_K~@p3zw@yUFM0aGgnBY$Sf}952*V^WsmLYsVbF-7cRd*#Nt=NjlB}5IpX0u#bsw4 zi(3`?2kLLm9~t#0hczkX?CrSi>Pc_Qv9qzwSEMQn>p;>e=pwgB)5D3onye)bZK=)# zj3D9yH{UcrUv84tb)20XOnJoW*6(;$lDiKqRmxwg<+97swp#U!#Z@mA7FGzxicFjc zd&)bc!A#*z>WxZ)?Xik(=YUrkF#vThgI2M>nrsmQh_iH~BJxasK^f9AWxe|Xs0B6v z5kT@PbS0onzjkV`t(a=}n-#zUE0DEN9+i)m1mSMnTkMTqy?F1iw!SXfq22Rkg!<`DK;1yp^;3!dIX7@s*OPPc*=?J$PrYCQkIF9(X6w}! zLe!!c3oH8D7TUMrBWo&@9{#H9F$t7X3p5hIgED$1JA?PzSEjgh*gxcdC_2-~1RbI#*=33dq{(4K?cTh+bWa-1&Zbqj%m zp9_=|v6F>-3=?zn4ks(!(Ca?y#YhUPXdU(Cm6e4X74vx$Z?e(QALK4IMPc*Ih} zj+p)?d3-lI61%bC3o?rDpHdYV&@Jhq=(K zmS26b@>3n(Nw4PXA_1FOmnhr0Yp12IIg8d_Mq~X2*_yLLy3q*2XM3<$k1r*2p^^++ zq(kn>$T#7{Era1A`wzBmE|iv3BRno!M;YgB`1Xcg1bB?$ZSXy`=#7#soho>^SZ!tAKO36X6Cb9yc_<|H14&u-zqkx#$`u2&73HxAY)g0_&g%jRWx)SD0z8^wZI^$M*#l5_mVe1P`p0Zhbr{l|UUO3~u^_T-UJG|=0Ei-GoyRZZ6 z8J;YMFKvg&*caTLmUHLib#-+%yTJl`0V3Dr@LdtKoJHRcxGG+AOO`^jrDFW1n%sD2 zuV8bwnjMN@s$x0Kd^LDk!m0?OJnK)C-*pSs1&L>H=1Ro*fS0>A=YS_!cssH8VRxNm z+djM<>JvG?7fvP+EU>E)J|}egVCSnFe&uAB@FsB=Eac@$NaT&@<6&6f$9z}_tM;{F zVWB&+z)gAI($UNC%k-T?WoOLD!&@Dyt84zj-$70}xX<$Gvi!e;8C`ynA8r$Gal*>5 z;gDO~srZ?bQygqP??}g?a^7A3Xs_a6ZE-aa#JYM$$gXH+K*_+1v@EQitegn(H$Szm z4%9>fM$JU+4n<0HOzD~&uQ4FruM%ICs`q)a3jBvk+hoDd!O^+!bJa5h?m=&!yAXUm z@mf!HeH7xo+00J?ZxLoaWY>ge{Q#`sE50&#QeIxZWCv}3U?yN^cc{nVv2t3j=4nfy zsw#{OArjyr6LHysO?*A%jxhYOA*fyyJ24xmk6HyH-zuCW&BL*Gce&Z6r%2sq$?Fb8 zIpIyN*SZsMb&0f38g_6F+z>p)vt!p6!-ULVY+8LAD;ikZi^SSblN0VdWEBi6(K$Sx znETF#Va(J(LH)b3bPy6QmUy|{Gj6)Vd~~+NObk-i!0jYr!(l(d)%LNPURk`tVMyQE zn?Bl5vP}jMhOCmk`&8d=l2U~+$8|2buZ~tlT5qj)o7xlWF8I8?D4Fh2GAYE)X^=@x;0o8sG1jLTG{Rj$y;dG((U9w-^{;*6zIK`a2oMaM(myS2T{B;&(xd& zrS`tBr>U0$OQ~+}b_&KRYwgbkJ7tO9lk)mN-lkn3Vzbg+eDgO+(f?JZ|NY&k*P1B^ z$m1QI&(bzW^Ps#}F5f81Y|9vy#hIGE9Zbis4QFRDz8S_#P5n}4Jr|C?-PP*tppSXi-rV`_a{Ab?=?NOCIjP0ZY6J>l ziG_=>M|ZUwLp#F~5x+k@_7k>^oY{SG)D>c%tpOs(-Ca;Jf-ICyjy%tuMcL}$>oP10 zg>!!E7t@^TuS8Ne(u~(5jlL648DM_Zg4`!J*&LR~DT7tYyLPlRH2PiT_mJycQA%_g zzcpRg%U9-Ji53cat7xaRM(?QDAqr!0JPeN^C5Pc)_&}Nb zJKW^k4Q>Tj*)O^{z-IKKS{6*R4IV{jTWMZ984@U15bux>(n6*pHm4J@t%*7c@qkeO zurLhB`Q~#5mD~?veyDdKQZWo+mslTEhMR965v8Uch)ykbz(01RBr`K31Dk{dbs)m0 zu2XD_FXds1LcOXG{Eg5&b^Xl%C0@;}kwwp-T8IuoMsa!XCe?G4ZRcp~SMNqys8NPf zpk^sqpT=*zPrNS2#PaarLp@W|pc@Y)=<112JclU(>I3@R39$1~`QE|BeaIdNkTZ(O zf-}vO$>aT%ZS#&2^k729;&PeU?Plu?lc5YYPR^cTyC0*gzx-T@5g=%2Xz1izxFI4< z!7t9a{JN9au^E>-f@7cUUDp$_TRe2Mn29LMq+8gJ3V10cb@wv~;m__FX9#Cteo z*9ag~+74(oOD(!cq7Rsg)D>Cle-L^*KOLguk=R71j=&I)#d}uUk6jQF_P%($;O#59 z=<^lLt*K^GZ?kjk5Y{q5URs0Ij6ypYIiKT}+_%mSBNI79Ir5&;#t~R(YQx)IUzWYeGqd@XF5gD@y z?OF3>Q%N7T3j)Ls6<@ObzHFJn=qGg3gyBZB*Db4}{_SP&+L!n;^ld^rN}aLH)VI=B z1qpO7A>a6(@>BD$NG?}k4Jp`^#vy4<)XOJ7b;FJGVWAQfME7vTXo**dVlzK^kd>A7 z*{+5sc=zdqG!AJ&qN+?U65oUX;zw2iLOi^pgVTvxqK*=~P`?S=)Os6|GIn^w|cd%t{XB!0qohibX-oMaGdl5WD4Wp*vCPunCef#t2 zx-=6@Z{OpI8ZSa}5tl$iMuK$HsFipjbK#4l&EV+h2Qq2w?Ba<<8kNuIgb!$DR{E2I zDl3)alce>GjU%uqGK9BR#UJ$c(R}3RW0;M+n!Lw>Ma2@^g`{_v)M%ootp6qP$2Y@Pp7`fAiO`yD##nCo4C+ z?|EIJN=q|_l5ykM+S=CI>|0dlu|oFWWyo1%jVK#u7U&unebPKM>6@iFeCCGZXLTd=(Q> zt_$f_mWBmAhOoNYacw$JfcEuR16P6k?I=;8c8P$K_0mzoQaqa{31nXTOrQ`PAN#EH zAQJC_2q$v4Y}lH8rBC}ql_%K02qEAa=V0R=WZUq0!jmKRZyUW6wd#fHRYcGia$+?s zLXn>m^PS_-)@GZ(W1c=1Grap<=NheygZ!a8VV|c(5Ths|4l(}wrbeVY5RmL3zZ%>= zA>GafgvqklTg=_7Ke2f6lVdLZ_4Y;Z`;KHq-8DSI_43W@z$O>{?qfe5gXI-a1}aTY z!L}h{p-07mdvF2|u{~;;y%`NnD&IVF9cS>B)`>Ugr z5?X3{sPXNa`}dEjPi6{M?z)q|<$K7;D;Q2qav!FS9&-BmO~m!#R^OUhY*2Xk2TLn~ zn1QWBzDFxi{YIx<>%zIUfcg1)WDJiGhs|sGDk<&`McZxz1E`*PL1Ju5N+XpX6l$Pv z9QR^*xBBZg$K9iGTF2z6YHQ!y^v1y8;5M+CDvtd&u9@rj(9`RvoN)b~Y7FPg6AW_U zx`;bSrx~h!uKt341GEH$@VoArNztDw;Fp_Azcty2UtY--eOa6_#&wOg{hT+gdZN;h zSrU^X!k|Oy5i>Kst?hoPTHTw<8YgcVTJg8a5@X59sptuW=ZE;uz#J~^w$AJHSXY{D zx?jeTnwS`TxGD!7Pu)iLq6nAO9BUorW(_8IQtYIWEw3BcVQpr{z{Fg{=CN{D(DjHF z9_Zi8+H=)vBKt4d{QiO-lKip`_w}yeF-oOs{5G5AX4w9#&T~{k@eb6OQsamfByNL! zn%LD&i7l;RhxKW)_jD0?_8T2Upvv}Q+ol%=SR=`#@?&x&pl815wtn^}IraJ6R9yQi7#RE$5##!@$Grtma9mHoKC!}M|xP(anm3-uOE*Dp?{z`bk{n# zYpW{xdea`Ad=g2mM7y(>xVd$BSZeiB82ZIphs$i@L0(=SpW`BH*!usGX%mkpL>(-*7P>v;8XDWZUr}IkFy!TD86cW*2=<%3@Rj) z6eF(Jw0FP6c=}PrNGV6*B7a!#WVdk@-5J;qheSI1vj!R?qcH06j-+Vsqmj$=W~ zQ@&WgNdHD|M*dfJ{P6_wl`!;Q5U&m*J~j48EurIbB>#1!CzT1aFn={oYbF*&cMN6 z_;_GEqvv>Iu!-??ig3DvYK^*k(DGMy909^-I`f?`*_*vSR;_ev|Rf`DrSG&~@bHQP`YB}km7bS0G zw_Hb$ihV>P&ZyTmXe1<>V3Ws;3-j}VAoZ@N#Fo`iulrsQ8Fokep&O|;+j4`NZM%8m z055*aJu1UbJ(Skqc$eGkM_ZpJug%efJ|H(2&Vs0&_pI6Sm5XC3i?%%FGUSNak8k|c ziCHx&`FJd}!U(|0P{(qzr5=}(k7;NYX6o4*N4hbOpYKe6jZCB!2T8zWHlOkW>*&1< zZF{4ell^OvKY{4jOff{qD%046ogvrA>evV1{ZPkhXVsGn?uz#LoLnjtQ^=?NBsQKFqVEW&4 zr|$jB)t_-gr)L=%88#O(kNh#oF^uVqoyT*-E{;i-T13w;6Qtc@I(pZeTk9ps)HSDC za@=&a>fFh@x?Tae*d9NGr?4Y%E*=yT*H6vOeXr5=B%rbJPM!V6dqI~i{N1)=ky^r8 zZnJcq6n~p&e*LxiA%Mq;OXeCb%vMW+25qyye9<5D)+ge!;Y&`@R>mpj=qsmcv4OA9SDE*?gv%oNkmAu`ysQ@>$DKWd#F@SJJTjVujrgzouAAuxwUxl z*4gjBf6=8#uUJduda~x@gK*i@9|ZOpaY@&ayi#QqKUBrQzqOn!#{`{K=jZfB2yz@R z6ik7Lv@)@o(Jl?-`=5ZGrx+IL__;hg^jjQc&Pk&Qst01BDSGDg2JGpLRLQ zRu`jFc(S2?cYp}j0U2&;x-TE&!tqvritBu9^TFv&qLda96)P?-uJT6kN)ZVa3k&)6 z{fqgdpU;MF3fQshby#(UVG$yg66f#m7gl1ie2~JQ*&iV_cpfrL%|;;~vy06&`K1&w z*Ey`CfS^z=SqZy;oHXBh=iD5#TpFIE-EXc$Kq|sG-sJXIW_W&RRJ5?Z;D>~DgYdCp z=ap|a^2f#tLIcAF#g2R;UPq31%iZ^!PQ{40-yy!HzaAFKnTZ0EM8EAUFIT04rLeGY zC~F8SS3aHNb!xxP>0<*!iL~19JEznazx?vX3B`Q7Wrb_5;<@ziC#NJM1nSyBt;Ovr z9mBhA(}UpCgwmdQ^fDPB7C!(vTgJvPt*p-3-VVN$@Ct0}`55=tWHC6)MPP8~2qPUY z1sExoMHA9e`CnVVK~^6CEP8r}x@4EQzKA^!e#CXO9Vbj42EHh}RqtC++7S&5GRj;w zU6Tcm^Yqsye4kjw7?b$&MirA zgZI~mguSgJx2g2Q2gM4cjUJcnt)jv6m#1vTI|G3sA(xA*Op`t>X@d*j(awP~R zKh}^YR;r(acsMJ1f>C_DHijlk!t3Mb_w;qDFhcWaPt(~sYL|tM2?KREf8}nl z!SfL4PD4WQKxGUslv1_R?czxE+BHlc8ifY0pNUmHIK;##&$wCaU-c+&prxNNE9bJo z0nsNK!Tn!d)Sx>JP|At8^Ihlo-XopY_F)*>8|cvAjiO=V26}qkggUZzNn|_h4us%_ zU2R!l<8MrjoDx!Cu2ZO9hj}O9NKAbq=L>&T6&<{b_HMrk-i5OS0KdS46U>#ik#^~( zy51u5GlyBGj(RURiisrLaJ0l~2^_lfpg9?LqnQLPebZ21|GmC==VzUn`)s^%kA96x zU!7bs33^_!Y0kl?M|WI-FHU=BTSo`}lP6EkHrFUS%sI-Hz{I$*zEpZ)zcEPm_1&_V zy$sh_vF3Zc_b(8B6}sW3b0EHLzVVYMt|=>P&BeJDKQ@~MgWYi+vRZ0!F4n>N{oSI_ zE7@KCwGs12$tfxO2g}k-I{a9t6yNFMR-!M&{+bi9IELBkx>*IPqwvVdoO?#mVLaI# zrj|Dxf7l}y`cMe@Rw!yUY2tS;iFfRVl5Y3o0pW&=8S1sH!^Sb$=WC7p)2G{8TkP-7 z)Rpsl_u9t+VY*F1{@HZTb@q_^P3^ z9aTL3FLVj?cKT?2H~>NB?SG0}9#3_lzn>Xe9d;-W9cuRfeFD-yl51e#cX_@)+aUh? z$#>|l{~X-$FF`Hip-JEGCpdwDKn(iy1sfnZ$nCQStg2Mfr-qi6nGO>s=3S>oL@k?5m8^)8SWN6z$iyqSklO_BG3JmV4CtKUH?wlylriU%$-m zD>J!4^~tH%pLy*Z&GhAX_hLmZCwyjs-T`-}Utz=7=Q1MCUA56wmPuzVV5Lvv4x}?Y z)fY6LMhwgz%7ya8=2z99&0mXvP|41FuTCDgf2^uBSM1l z8Q;s2I=24&sS;2L4j3^pGj_&$ZjV`bvJg*sKlerWjjAcgx=h`S^X z5+fJWg4^pONSTsXSdAV?dOrg9Au^2j%Y#|QJ;UY*I^KTjMcu=a0EHQO56icr0u7>f zQg{w$A1y2_I4tI7a)eGOCNLo1S7Rwa#7cU5eO`1i1c-w(l&Z?3%t^GD@A%%z-eQUr#F zcV@o0We~P=bQG16VgNTe_iT4HAxe0YXe33zi{yBdNB-(kYLI5)=ZdIli8cDCro6Lc z$ElHaH10-ouSgFM79~@PK(3*x-*KVo8KY9Hg#lRQ)@ZO352QplVE+41dtj)0THeu7 zdR~hDIYILYz{zcqnF0F%Q2dUFnE3erSKV1gMfJY@egHv{5@~T5K^g?CxGYCu{*8ioczknYZN&+mWE^ZcI^ug*Fz&b(kP7dSI(?|WbO_5FT7 z*WUg%BTN}4fsEjQ8gIO{8!HY6i|)tGAR>0a$jGqVp^Gur-icIMEVWhKPIX(^J@uUYoI?r`b)xBN)&K) zF<f{YStwspJRucVzXP{<~=`mpu{0Y)wVlSgY1PkgtDFl_+OuvrAhb}$1$g4Wr4doIkzrF7dfO%W0|!a+zlj@0_XdmuSX ze#2Zbx?#NaAQb>mDx}+L|LRoc+1Fh9w%#Iq{j1SiXzOfi8a6lQ{@TLkLqsyF^=9GK zN0iyD5RzYED@b}f}L7AGAevQ3Qv0*i(zi;q+ za3y9~B&$^p^E*vMP*pN6frDILS*g%09;lw9>>d=<*baPj)*5PYH}=IvQ}wTkE}ouW zTDPWA6}Cs*L7{eO7KRy;QXORmSeIB_TYqt#oj}c_#+M&3c{; ztiOQ3KrGc%p?3t-s7FFVEHdXt_0>P(p8IflT01p*Lop%NL&XH^qXkhFOQD&HzfD(% znkbM@3}^<@Bsx}}Z0w$-8bJP3rtVz1QoLl2x_-F9vGlhlUeJ zv)=KeFylen@QC{iqJtT)9zPU%;E&zR*#;aVV&_wT(YS*Kv&qlZtE1I?;M%=)VpOIo zQ{^~Dc(zZ`a(yL&J@8DEst)m3(dAMujI;wt`EuaVNz2Tew$-OPyScS4${{8ioS4R) z4rPwk`GzXsI$xN#-}%ABTV}j??vaxhy7;L^s(!!kMhwgz*g*j!T{a!*a-&1Cs`W$$ z?wjeU#nxb+iSo4(FO2=MxApFO9ic=Tf^MG8ro#~l?d|6D42-n2v{FNBLt61R_5IR+ zncPxG^bDpKP?LDhC)@M74TzlA_!|Z*gr_@F;VTlfqL)x|&g3Yt>18SY&hk86HSS9k zfof=IKzLCUrKMi(g|LybA+Q9n2PXUaRP5@MRaK?4vPM{T4uP0jTq&_z9V{(cs1~6t zCj$1J$&a00()4B5-q5$dO>llvyXypSWEKtgxg@HD0EL+;eb#dac@wozfnWi1g-mKN z>f$KS8`O?%oNfpN3}(W0&=(-~e#6qA0yEvlKW}|+;I4;TJa_y>OOt9p50sJ@R^pE- zSgSYVf{o;(fYU2r1mk#~9}?HtPy2&Uu*pXU zHEIZ`-q5Dw=g_t=2FXQdiXL9MTZgnE(pSn(B;!fh(FZI^ACc?=BD&EzprG|PiqIQu ze?K9}mgL=3@H-=6g?oa*iwc@5iaC*#JhpU=%CVgX6a(c>HykO=&hpXy>(y*kHCR(q z_Y?|9v`Z#}j2ddQjHU@scYkAzml^|aJ04lI&xDEZe3j7V7PS1P z#c^|{8v9pr^|0>vsEzH$@I?(@Axhd@i#`&1K@v*1gG$I%hv}t)T`x@>{&6>og!9eh zANf2&BeJU%;5D?O@;H0qo`mM9!OT$G4q-J6Vlc~J33-|@<5X`Ev-J}Qus7sv^;yBU z4l$U!6eKMt`k`+G zCr)ETqt}K0l2>O}m)YUO4`#hqlA~Be#M0(w;dj_sN~1kY|{P)676(K`$z1no;DR>t6g zRlm0|uNGql9E|&*UdZU`=8s?H^0a%tSnPtmPCWOl5O;5=@f>p7;)}q_GtM3ytWA8N zhUx$HkGa>J-z70Q1nr;AhyHMOAG>B1$HRNpcjkaYGt(UU151Hf{H=@2nD^EZTs(`t zA%ZEfy@&fn0qJbRnd#(yFy+$;iu)h1AMg)46cjCXgqaN>Q&(rpsKGfJL>f`3B=FuP zVT_UVoKhJu=75m5!iFwL& zTB`c@sqd;aFkA2+&XD*VJIai7N)%o;A(}ex2sXmt7jL^RPIkB~M`cTFn!Jzh2ZkJW zf|3AizsxMmp(8_#*Si_f<<-9eO&*_{{f0GXDlNe@@;t<1uh6D$AP#<4)f&m}`7bt7 z7DIgWKWm`LT+Z(~!Lz)ddK~7xBpN*_`=I`i7qr}CMP6vVoxp2>uj~^mh2jw7&4p1@ zgxjG4wc-K`HL}V3)5NerjH{9^OpS6flxu!B>vDZbrIIE{a(qk}<}f>pXFB|aOJ77_ zt+>SDQOwW5JU;=$W@36uk|ze2LF;37rt60{CdVmjr+cAs0XA$T%C%1EQi86>q~6Ed z0n>i-ooWuk)*s*L^KE2($)jLYq4{7Vp(a^fILG{|IA|3admz2~07R{=-NZ{3aFq7p z`ZU=<+D+(CHY3E}+WxDE>oL8pR$0#J+raY(Vz;9#U)-nf;1q1yn1Qz=+CzTXaXEkD z1X}@aL7>)#kWCM4d1+^4{x&eKbl88QDs>cOWgEN7iLcN_-QHnqFw=;lczz+Y-^gHT zDEs#A(OQ`p$*;aD4%8WZWW{Cf`6HKsa%;myrYQ1TJJuNb6iv$6ibGy$8qSOqfv+|IT`qD`C9U^g(x3E86IV zzc#MB=SOzMM$PyAo$-ryjNj+vkhRVQXX?yzoc_HwQb`siDMGO;>>y?zMeAl*xSoYQ znOc-Xoogfx3gN{T@mQ(-?l`a#gs;$pr@)pfaz4jNcovkr(h@^u+`PQFh{N`btuM9d zo=DAh*C1{7%edtcJ||vWzy$pHLk|{;%HCv~0+G5(sPN)5FweOrjS>&Y^)P6Ph z?}6)JRQ^>_5J>mvjxU2G@jIYh5#1(BArK>Li8fMz3O=iVZm?c8`bHwL>VL29H9xg! zbpr;_SVCM4cLt(V8h%TziEoDj?JIEfdku}`mGvO@SKQl*sSe6Lu2|6}y9%m4-^@R? zC^;|wUft)hwy~GDx5<^fLPWK)zr(9gnpqzix@hL%vs^bS zIg!DpqOvULJv$kD`(jYl#POj+FsuHw_SI+-6C0b@wd*(}95-{TW4}&$c|AeQN*FkI z{>Kg?nE-Ei(3`}VG*y3y+!_Q+!FtchdbSX{K3bZ`^*r-b$I8-@P|}s!i8lH*NStU- zuf-16Py_&aS@c`VD{Ravcr`132bk4nFbV0LC<%uiPO7QXJ+nOK>(aUYgncndbXse2 z{_z9)IzwD$V$bZ?o3y+(MxSgk_<5@|9r0%G#_)g!lUx7P`orh&VmA={B_t->h;@N2 zl$X#Hr0m%sKS3)tv>pW}+@Oe4mBxRnv)q7z&!_CT3zhT^$WHLXEO)Ic0=~-am?cV> z=QMerjcuJbg4l*yZTO3m=~;&KLSpnFqGO-*9!#zNV!&wTy9?Rd?b#m@$vV#wjg*5; zjwo`@sm_PNo$q*f(r!U+6K%=7d2|1gXj@KMIc(825&tO#rP^IIQF3zKzgt%s&NefP zXEGKR0fe<{h|bfsB!MLJygmBBQg+QORVl62KiGCYY^gPBm!E=?MYfmrCgTI98R+RB zJt06>vhHrhzGR7Kf6ct9?guPHK|d@k_>@;wwKu#q@jT(cA4n4mPBsur{Sd#LA3PEO9Op7BUrRw3K7;y&P9GWVNi>RhwskMvhpMhL-*xkKa^ zs%fR#0^0SN?_LDkprIr_qrt_+&FV^L)8Wu6c!7m%5~k(mz8_y^2;A)0-z IFlXE z{yLxa$U$1_2@n%CUYuaL%2=-&q#h`tTmjdNU){LTTKm>YR$l%QH#a5;Q$%Y6AaOvp znFclx&1Axaqh9mNlTBPmhEfd7d2d)~rrw zIKq17RN)KDfl&w!Ge190b|*gYoEEaPB{J;we(n5p^Y93Ea5Mmt$?no8rc>he-|dpH z1N{aB139PR2eP6#<{G`_TGGr+C*lX^ogzX+{3^hGqi z8_$|v*GCnMR7YhL%xae|EzdWHD`|E@Afn3Nw5+Vkc3tYsI0Ul_p4W>Ka>560^{T!+~-XY=l z9JV4jj>hU-^T>)SS7Y*|xrixXcpy3f&(>=Lt^bgdBvEYD<#@3qpm2Z;>@Zd!Vw=LK z;BTKX#BAp4jsEiGeaW;ymPK!kGpG7Jr#WPgmfy2^WKhkP`%f#d^!DZ}nND{mNq8L( z#Q+V+((?@o8UXG?hm(sP>6p%601aHjpWg76fOIo$%FD`H&p362y^Kyt2?G5M1_bOI z?Z1*ygH=DH!mfV-;Z3@1J{$6q74BQ1vt?svHJaE8%BJ6SU3Px9akEzhNaG$et^jQ+ z9*H*0rPgl|5vdI!8-*RN@Sj&?`l9P*rnvTKC=v7W8G++mBOyHH{|FJK9xxjA-u*Xk zqkHg_T`3Y>L0QB&zim{~S8!=Mn8D=2NhzQL#1BGAgy%u1BHJ5A3Ga$Iq!Kx1)>GbK z-L2m|aqWz8gBtV1?wLx~){C8(?d>VLPc3p*E-67sD@u@696%>iIP3!j>%H$$`UeY;k$m`|#5(_6Nmcvo zEx<9jd{-%D{n(kc2E!&)?;119=;$7kr9XqFsMo5(c{P2?QnN>umah4T!>|fQiEpR{URqpqcdHRGjgB+}oHc}y zihzPb2OS0))RqBwj9fLcpIN&~5c3l2Wlg*?ljY!e4L+>_qAZ*%aX`=T3X=f1=Q(vdS`)MNB5~8 zHX>ET$WB~DNVurIqezDnomzSAPhfmPne;60Y$faLvcA+Wa~%oZcPAH?$lS2Ev0>o}Zjcts+ovCF?-}inv zH=MBt;vXaPp!2+ajaN?|Rj-;_3cvMytL6=qsQb3Ed{ojMK&iF1wLPjRmmCdMw5B5s z4B39%-kz^of0v-xU=Q4QmGq;Mtdy1y7D#hbht={xTX#i!Xn9z4z}gzH~ed zntBHyI_^zlY6q7LCzz~(&r+N~x-~pDCCd+naWC=%*0s-2g^W($nT5EinVHM}s0%P< zcDbNbrl#V|$m|)y_tmt(vJFGEU`W0&rDK2cglBesxVC?!rio)qTDmoFNmWH9C?|OP z1h&q5)zdfKswaCe0uVd736U`!-jEP*^VoyDtB|G(w=3@H zE()`|Cy=#a^Fjw5+d};(4gXLHus|EY@cBL+VIIc5Ja@Y#R!ie1URPiuN2lw*leddXS(Z_zV7 z;_MyI&!uKv4*InUy(wd}3Tvgg|x+yF}19m`g`!2dWTs%<~kZ()BK58^-XXHe+Ua6dN#BFn2uX|h_dum%9gdV6|Ord^-h zk_kN_Q<9P}fRn4Zbe3f8FUP#I(ua?@8RJE=+L|aFB@TmX$xa(aw-t;trCC%a2~AnVc9-=9-$4sRoIlWjH3_wrEbJoZjbnloaaU>Hkjen$ zQSfVh%)cp#Z-BlF<4E-dydTSp5w~>R7a-~HHs_Xml_$X)(~SA&V7aDRCS_Pe7KF9j zjhcsc!`djS;;4*qN4v%8N=`J1_!6;yJdDbN*;kM9Npo4BiU%b3d^~6@J#*ek?<;AE zR?V0A`pQWuXCE~uhj@4h7N#J2?E!jw5t#a*=cb(?%vVbx&3Zz}J#u?!bk5uY&M*@x z$Gq4x+jK9EQ!gB}^rySD%H?Q@7G$2=oa2<5=Wqe~c)uYJDb0C)zw1j|f$j8FZ;5iN z9go9(==;tHgu0i4tMKy}#{(b~d*jm?U)R@L?<}b$@8bc2s&xO#6%dWUz%b2Y@7CSH zaI@$}Hckj_Obuv{*J-{D3JzWj3ejP;eH^W*jx=PLX>`9DX-Dw_z>Q#})6RBeY#RPg z*sIxUrQ{oTYgg}ch3K>U)I4MS$I&9SsjJ^V+{--p70VWH{U6sbuzF;M=sJcf6^t*r zt!zV@H@+F00h%qZ(NC0e&U=R(WEh&wW2FKjU ztFzv}%rCy?FYvScWa&PtFe%WfC01tT>%+y}3&TwM^vNF_j%?#}DPXGyYOTNba*fY; z1wCWac!l)EH4qW$I0nNKv>Y6S;$Fte_fC!-(Cf474#1Z9Ojpt#)%-{&>YetJ zY18#TdgV1W?_}xD5ix1R3;+uPfl@Ov%L_p6U~Z1_7&(T=eDlCerBzc zUy3kXdM|?>U^iD61afnYj-Wt5C3qb|mYA+FAsu!JqcZTTgc1mEOyGc9pNPn!=-5dbggU69hoctGkG%xDmFkm*hNo zH#i`heeK|YmA(ykS=;xDeamDOS66py8~bkI%zI!6kvh#a5}#_zAMmeqUNFx#K-bDl z?myN-Q58mI%sJY9{*qG6Obw1j2AbM#?&#|JF|;@GHrRA<5Ps|SZ5HnfLVuv^&>SFO zLGKdc_^QG{CuFd;l?aTj5F%(eI2G{ABK)1!`QOTeaw&I2fo98XlcwWRgh??Ia>pQZ zj~UrE*fJ9+B|Prdzdgc+=<4gz2?;4JqlBl>Qbjho@3{7`N@~hsnZvsRM^!<%mWSW(xPJKtyt9x{?>V2B8m#-?jT4wRG|EvlO%oY~At{|AFAc z{{B*C%@q+q8j|G=;Dq_<-ClR7&;3_qik20iDFE7{a`Tv7$-0kpQ2aQv_q$SO<4B49 z+6!%74VuUIsnuQ#2vqngo*ZvcgV_~4xo}&AT+9R_c%Srie@UqYMa?-Ls#`bW-bdV> zwD%PZSJADmTa=VRM^^r~R3q)R9Q40iF9)Za&xR~^xk1o)A^7&16p6!OOuk$*J2;IKhU;BQ7rDc#hPrI=noL?)WCZ{XevY z+69r>T{pKNeMw^2U`X1GZA>fVL&#Ogv`+G=4P!_!uS!z2{6W<+~Hn+AaET-jW z8oi+YzH6chiL?xSWR-rYAvxhXODn(Zz?=m;`IEFzMAPj|T`r=_i>H*JpBvvWDOZ2^ zP|SnU!^7j${n+=rV8g*Iv35XD+bjp}5Cv(5%nobRnENqMrHTe*= zZ|KlYRJiiYA;)$y%NeD)Ty_V=!7R(`JvxaAT?3PmxTDp%lK902>AlP+v!kP0Xr~XI zMRXnt-66p101YO@{Np=tO-t{YGE|H^vF-o)8R$x(y43zN+Z$~8eCS~LVqU@TezIeK~s;f0Bpr>RC5m<1x2^YRk%-k4^geI zpSi9dy0D*{ah#t(!5~@K!tvI|sn-g92iG>pqyVbf^2-2KZr?!%)-FLq4NMxeSsX@4 z$Xe}G{I#0W^Rar~>*#C@=knSvCs%S>o$VPxFzUx|ub+MD1H3>EBk`!%UVeZy67V}2 zMK$lt+!qz#T6a&Z2b@|}8faVid3ZG4%+X!K-DNhD#T!ItJ{J~tue|oXFbbn}1qA)g;X);*J@M)bF+dWz zfN9DIo4Xy&^`lx_M_$UdthuT;Fz<`wP_+{znz%=#UR3tL6<;q9H=UoF{#%g}&U78u z{(FZD9B^;WT1Rtks$3Q7sGlTOrc3ouMHQ76*5({f_Ip0(>i_8&hjgNQo^~sxDT_QS zC26x<|KkcGqt54}YnnCfJzj59@lv1>v($K%x-^NoGD zLSSkEJ#3%ds?8@c(Ktl-N|~g-K6q%$%|F4W2=w{45zo(w*)hqT-K<()-%HIKTi&%) zy&9&cVNv4*BOYg!!&HEJ03l(yMq67H)PM))5{z4x+YKBKOj&7f zfXOTU8td$Ry%%q|y$?yt{E=0~Jxd^1F}-fgS)HjqvdTHAvs6jOQ&Urm9oPo^0goSe zfqaTrdY&X5?gzsw5a32vH$fy!Lw?d8g(@Nl-3t9nW# zT$oM#z@QGdSLYI}c^*}*pKBj~Cs|a_A+hdJz(Th~_*ulKkw*|5g6*86G2;-+uqCDX zLlX#A8N1oK$L3s!iHy8oG+}?HKiqn&mJDowcZi63Kg021d{5h~xVWbLo;$K%g1}&b z!khWBPK7VZy?QsQuKZigi!*kXy#wP7Q}&O)&jE7!z@o=D9Od*%iq%~fT-=NPH))*S z__^(rrFz=?`>cWSoW|y-zJM3YUo#TvbpnNqI)MDw)B-R;AP~SF@qh#_plyv8AnND7G4`;n%ESF(r zvALaH=;hhnNA$AQshl7{00tD7y{v@j(9)60`8V)@^EsnXjBY_Dw|z4 z_eE^1WCSu>e{Q-`%mTJX%v-=9q-GYB% zwA^y_s&QO5m|fEUy#2rZFem;0g{l4Lnd<+B2*3Zog^8)z(#LQ=5K0Mt`yVf=+Nx14 zut$bDQjnd|5WZkir1y=+0KbOA3kq!BhxwL`)L2HUI(}mp|1b9x`sdTLTME!mIruNH zFJ}MK2}OU@GG)qty|WhjzX;(l_5S-U8qt5K5&7fi|8ldh|0i#G31hzh{cho;#1sU) Nl;l)pOQl~2{5PVGW#j+= literal 0 HcmV?d00001 From 4cb56482ae0f866c1bc688d14bf45057e329cda5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 11:55:48 +0100 Subject: [PATCH 165/247] fix: [#1579] clippy errors --- .../src/console/clients/checker/checks/udp.rs | 8 +++---- .../src/console/clients/udp/checker.rs | 2 +- .../src/v1/handlers/announce.rs | 10 ++++----- .../src/v1/handlers/scrape.rs | 10 ++++----- .../tests/server/v1/contract.rs | 2 +- .../src/v2_0_0/health_check_api.rs | 2 +- .../configuration/src/v2_0_0/http_tracker.rs | 2 +- packages/configuration/src/v2_0_0/mod.rs | 5 +---- packages/configuration/src/v2_0_0/network.rs | 2 +- .../configuration/src/v2_0_0/tracker_api.rs | 2 +- .../configuration/src/v2_0_0/udp_tracker.rs | 2 +- .../http-tracker-core/benches/helpers/sync.rs | 2 +- packages/http-tracker-core/src/event.rs | 6 ++--- .../src/services/announce.rs | 16 +++++++------- .../http-tracker-core/src/services/scrape.rs | 22 +++++++++---------- .../src/statistics/event/handler.rs | 8 +++---- packages/primitives/src/peer.rs | 6 ++--- packages/primitives/src/service_binding.rs | 2 +- .../src/swarm/coordinator.rs | 8 +++---- packages/test-helpers/src/configuration.rs | 10 ++++----- .../benches/helpers/utils.rs | 2 +- .../src/entry/peer_list.rs | 2 +- .../tests/entry/mod.rs | 2 +- .../udp-tracker-core/benches/helpers/utils.rs | 2 +- packages/udp-tracker-core/src/services/mod.rs | 4 ++-- .../src/handlers/announce.rs | 6 ++--- .../udp-tracker-server/src/handlers/mod.rs | 4 ++-- .../tests/server/contract.rs | 2 +- 28 files changed, 73 insertions(+), 78 deletions(-) diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index b4edb2e2c..20394d55a 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -117,8 +117,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } @@ -127,8 +127,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } } diff --git a/console/tracker-client/src/console/clients/udp/checker.rs b/console/tracker-client/src/console/clients/udp/checker.rs index bf6b49782..ded5c107e 100644 --- a/console/tracker-client/src/console/clients/udp/checker.rs +++ b/console/tracker-client/src/console/clients/udp/checker.rs @@ -116,7 +116,7 @@ impl Client { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port::new(port), diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 16ff83f81..e21a485cf 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -234,7 +234,7 @@ mod tests { async fn it_should_fail_when_the_authentication_key_is_missing() { let http_core_tracker_services = initialize_private_tracker(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = None; @@ -265,7 +265,7 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = Some(unregistered_key); @@ -308,7 +308,7 @@ mod tests { let announce_request = sample_announce_request(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( @@ -356,7 +356,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( @@ -401,7 +401,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 8decfe95c..b48d6e036 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -192,7 +192,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -224,7 +224,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -272,7 +272,7 @@ mod tests { let scrape_request = sample_scrape_request(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( @@ -314,7 +314,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( @@ -361,7 +361,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d9ac2e1e1..dd80e6b59 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -748,7 +748,7 @@ mod for_all_config_modes { Client::new(*env.bind_address()) .announce( &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .with_peer_addr(&IpAddr::V6(Ipv6Addr::LOCALHOST)) .query(), ) .await; diff --git a/packages/configuration/src/v2_0_0/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs index 61178fa80..368f26c42 100644 --- a/packages/configuration/src/v2_0_0/health_check_api.rs +++ b/packages/configuration/src/v2_0_0/health_check_api.rs @@ -25,6 +25,6 @@ impl Default for HealthCheckApi { impl HealthCheckApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1313) } } diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs index b3b21bda8..ae00257d8 100644 --- a/packages/configuration/src/v2_0_0/http_tracker.rs +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -37,7 +37,7 @@ impl Default for HttpTracker { impl HttpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 7070) } fn default_tsl_config() -> Option { diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index fd742d8d2..8391ba0e1 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -492,10 +492,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!( - configuration.core.net.external_ip, - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) - ); + assert_eq!(configuration.core.net.external_ip, Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED))); } #[test] diff --git a/packages/configuration/src/v2_0_0/network.rs b/packages/configuration/src/v2_0_0/network.rs index 8e53d419c..7a4668727 100644 --- a/packages/configuration/src/v2_0_0/network.rs +++ b/packages/configuration/src/v2_0_0/network.rs @@ -32,7 +32,7 @@ impl Default for Network { impl Network { #[allow(clippy::unnecessary_wraps)] fn default_external_ip() -> Option { - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) } fn default_on_reverse_proxy() -> bool { diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs index 2da21758b..9433c8c8c 100644 --- a/packages/configuration/src/v2_0_0/tracker_api.rs +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -43,7 +43,7 @@ impl Default for HttpApi { impl HttpApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1212) } #[allow(clippy::unnecessary_wraps)] diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs index 9918bc1fa..133018e86 100644 --- a/packages/configuration/src/v2_0_0/udp_tracker.rs +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -33,7 +33,7 @@ impl Default for UdpTracker { impl UdpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 6969) } fn default_cookie_lifetime() -> Duration { diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs index e0f022108..dbf0dac83 100644 --- a/packages/http-tracker-core/benches/helpers/sync.rs +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -22,7 +22,7 @@ pub async fn return_announce_data_once(samples: u64) -> Duration { core_http_tracker_services.http_stats_event_sender.clone(), ); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let start = Instant::now(); diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index 5af88c927..2a4734bfd 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -174,13 +174,13 @@ pub mod test { use crate::event::{ConnectionContext, Event}; - let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let info_hash = sample_info_hash(); let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash, announcement: Peer::default(), @@ -192,7 +192,7 @@ pub mod test { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash, announcement: Peer::default(), diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 23d589bce..8d12da713 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -349,7 +349,7 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let announce_service = AnnounceService::new( @@ -380,7 +380,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv4(); let remote_client_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -442,7 +442,7 @@ mod tests { } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { - let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let loopback_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); peer @@ -453,10 +453,10 @@ mod tests { { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = peer_with_the_ipv4_loopback_ip(); - let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let server_service_binding_clone = server_service_binding.clone(); let peer_copy = peer; @@ -466,7 +466,7 @@ mod tests { .expect_send() .with(predicate::function(move |event| { let mut announced_peer = peer_copy; - announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); let mut peer_announcement = peer; peer_announcement.peer_addr = SocketAddr::new( @@ -514,7 +514,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv6(); let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -550,7 +550,7 @@ mod tests { core_http_tracker_services.http_stats_event_sender.clone(), ); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let _announce_data = announce_service diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 1445ffcfe..4587bc90a 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -304,7 +304,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -345,8 +345,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) - .unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), })) .times(1) @@ -366,7 +365,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -384,7 +383,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -420,7 +419,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -495,7 +494,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -530,8 +529,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) - .unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), })) .times(1) @@ -549,7 +547,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -567,7 +565,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -603,7 +601,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index dcb814eef..78ef24e02 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -101,7 +101,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -127,7 +127,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, @@ -150,7 +150,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -178,7 +178,7 @@ mod tests { ))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index c271ee5d6..ef47f28f8 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -194,7 +194,7 @@ impl Ord for Peer { impl PartialOrd for Peer { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.peer_id.cmp(&other.peer_id)) + Some(self.cmp(other)) } } @@ -517,7 +517,7 @@ pub mod fixture { pub fn seeder() -> Self { let peer = Peer { peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), @@ -621,7 +621,7 @@ pub mod fixture { fn default() -> Self { Self { peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 74ff58e66..c1ec308c8 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -115,7 +115,7 @@ pub enum Error { /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; /// use torrust_tracker_primitives::service_binding::{ServiceBinding, Protocol}; /// -/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(); +/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(); /// /// assert_eq!(service_binding.url().to_string(), "http://127.0.0.1:7070/".to_string()); /// ``` diff --git a/packages/swarm-coordination-registry/src/swarm/coordinator.rs b/packages/swarm-coordination-registry/src/swarm/coordinator.rs index 1ddf3e60b..f4e94c62c 100644 --- a/packages/swarm-coordination-registry/src/swarm/coordinator.rs +++ b/packages/swarm-coordination-registry/src/swarm/coordinator.rs @@ -438,7 +438,7 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; @@ -605,7 +605,7 @@ mod tests { let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; @@ -626,13 +626,13 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer2.into()).await; diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 986981b1f..ffe3af3b2 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -40,7 +40,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; let mut http_api = HttpApi { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), api_port), ..Default::default() }; http_api.add_token("admin", "MyAccessToken"); @@ -48,12 +48,12 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; - config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), health_check_api_port); // Ephemeral socket address for UDP tracker let udp_port = 0u16; config.udp_trackers = Some(vec![UdpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), udp_port), cookie_lifetime: Duration::from_secs(120), tracker_usage_statistics: true, }]); @@ -61,7 +61,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for HTTP tracker let http_port = 0u16; config.http_trackers = Some(vec![HttpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), http_port), tsl_config: None, tracker_usage_statistics: true, }]); @@ -156,7 +156,7 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); - let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); if let Some(ref mut http_api) = cfg.http_api { http_api.bind_address.clone_from(&ipv6); diff --git a/packages/torrent-repository-benchmarking/benches/helpers/utils.rs b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs index 51b09ec0f..16ba0bf7f 100644 --- a/packages/torrent-repository-benchmarking/benches/helpers/utils.rs +++ b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs @@ -9,7 +9,7 @@ use zerocopy::I64; pub const DEFAULT_PEER: Peer = Peer { peer_id: PeerId([0; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::from_secs(0), uploaded: NumberOfBytes(I64::ZERO), downloaded: NumberOfBytes(I64::ZERO), diff --git a/packages/torrent-repository-benchmarking/src/entry/peer_list.rs b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs index 33270cf27..54a560994 100644 --- a/packages/torrent-repository-benchmarking/src/entry/peer_list.rs +++ b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs @@ -195,7 +195,7 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); peer_list.upsert(peer1.into()); diff --git a/packages/torrent-repository-benchmarking/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs index b46c05415..5cbb3b19c 100644 --- a/packages/torrent-repository-benchmarking/tests/entry/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -368,7 +368,7 @@ async fn it_should_get_peers_excluding_the_client_socket( let peers = torrent.get_peers(None).await; let mut peer = **peers.first().expect("there should be a peer"); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8081); // for this test, we should not already use this socket. assert_ne!(peer.peer_addr, socket); diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index f04805001..1423d4bcd 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -10,7 +10,7 @@ pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 64e357b1c..56882e68f 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -32,11 +32,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 901a1434a..ea19611ce 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -491,7 +491,7 @@ pub(crate) mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_ip = Ipv4Addr::LOCALHOST; let client_port = 8080; let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -869,8 +869,8 @@ pub(crate) mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + let loopback_ipv4 = Ipv4Addr::LOCALHOST; + let loopback_ipv6 = Ipv6Addr::LOCALHOST; let client_ip_v4 = loopback_ipv4; let client_ip_v6 = loopback_ipv6; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 43c5bc4d5..add576a89 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -340,11 +340,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 04ad0f39d..0d9540289 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -167,7 +167,7 @@ mod receiving_an_announce_request { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port(port.into()), From 42850f3031ea4cf0d4c99f780ffcba402da369c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:10:36 +0100 Subject: [PATCH 166/247] refactor: [#1581] extract methods --- .../src/statistics/metrics.rs | 62 +++++++++++++++++++ .../src/statistics/services.rs | 47 ++------------ 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 650194d43..5e6d70831 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,9 +1,13 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; +use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + /// Metrics collected by the tracker. #[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { @@ -49,3 +53,61 @@ impl Metrics { self.metric_collection.set_gauge(metric_name, labels, value, now) } } + +impl Metrics { + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } +} diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 66bacbb06..77c04fef2 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use bittorrent_http_tracker_core::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; @@ -156,51 +155,13 @@ async fn get_protocol_metrics_from_labeled_metrics( // TCPv4 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp4_announces_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp4_scrapes_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let tcp4_announces_handled = http_stats.tcp4_announces_handled(); + let tcp4_scrapes_handled = http_stats.tcp4_scrapes_handled(); // TCPv6 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp6_announces_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp6_scrapes_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let tcp6_announces_handled = http_stats.tcp6_announces_handled(); + let tcp6_scrapes_handled = http_stats.tcp6_scrapes_handled(); // UDP From 44c184816bb57559fa6b396e734197a459f87ec5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:19:41 +0100 Subject: [PATCH 167/247] refactor: [#1581] remove non-labeled metrics in http-tracker-core pkg We only used labeled metric internally, althougth the APi exposes global aggregate metrics (without labels. They are calculated from the labeled metrics. --- .../tests/server/v1/contract.rs | 10 +++--- .../src/statistics/event/handler.rs | 35 +++---------------- .../src/statistics/metrics.rs | 12 ------- .../src/statistics/repository.rs | 24 ------------- .../src/statistics/services.rs | 7 ---- .../src/statistics/services.rs | 12 +++---- 6 files changed, 15 insertions(+), 85 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index dd80e6b59..85792f922 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -704,7 +704,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); drop(stats); @@ -730,7 +730,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); drop(stats); @@ -755,7 +755,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 0); + assert_eq!(stats.tcp6_announces_handled(), 0); drop(stats); @@ -1149,7 +1149,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); drop(stats); @@ -1181,7 +1181,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); drop(stats); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 78ef24e02..a1d8d5fc2 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,4 +1,3 @@ -use std::net::IpAddr; use std::sync::Arc; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; @@ -12,19 +11,6 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { - // Global fixed metrics - - match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_announces().await; - } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_announces().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); @@ -42,19 +28,6 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: }; } Event::TcpScrape { connection } => { - // Global fixed metrics - - match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_scrapes().await; - } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_scrapes().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); @@ -113,7 +86,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); } #[tokio::test] @@ -137,7 +110,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); } #[tokio::test] @@ -162,7 +135,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); } #[tokio::test] @@ -188,6 +161,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); } } diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 5e6d70831..05acea937 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -11,18 +11,6 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// Metrics collected by the tracker. #[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { - /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. - pub tcp4_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. - pub tcp4_scrapes_handled: u64, - - /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. - pub tcp6_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. - pub tcp6_scrapes_handled: u64, - /// A collection of metrics. pub metric_collection: MetricCollection, } diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index d5e718821..ea027f5c6 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -33,30 +33,6 @@ impl Repository { self.stats.read().await } - pub async fn increase_tcp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_scrapes_handled += 1; - drop(stats_lock); - } - /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index dbc096030..b53d6f12e 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -53,13 +53,6 @@ pub async fn get_metrics( TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // TCPv4 - tcp4_announces_handled: stats.tcp4_announces_handled, - tcp4_scrapes_handled: stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_announces_handled: stats.tcp6_announces_handled, - tcp6_scrapes_handled: stats.tcp6_scrapes_handled, - // Samples metric_collection: stats.metric_collection.clone(), }, } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 77c04fef2..60c4a8ebd 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -101,13 +101,13 @@ async fn get_protocol_metrics( ProtocolMetrics { // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, - tcp4_announces_handled: http_stats.tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + tcp4_connections_handled: http_stats.tcp4_announces_handled() + http_stats.tcp4_scrapes_handled(), + tcp4_announces_handled: http_stats.tcp4_announces_handled(), + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled(), // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + tcp6_connections_handled: http_stats.tcp6_announces_handled() + http_stats.tcp6_scrapes_handled(), + tcp6_announces_handled: http_stats.tcp6_announces_handled(), + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), // UDP udp_requests_aborted: udp_server_stats.udp_requests_aborted, udp_requests_banned: udp_server_stats.udp_requests_banned, From a5c5a890a5af81ce1e01a978759ee42432d8490e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:24:13 +0100 Subject: [PATCH 168/247] refactor: [#1581] remove unused code --- .../http-tracker-core/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 110 ------------------ 2 files changed, 111 deletions(-) delete mode 100644 packages/http-tracker-core/src/statistics/services.rs diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index b8ca865fa..3ae355471 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -1,7 +1,6 @@ pub mod event; pub mod metrics; pub mod repository; -pub mod services; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs deleted file mode 100644 index b53d6f12e..000000000 --- a/packages/http-tracker-core/src/statistics/services.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Statistics services. -//! -//! It includes: -//! -//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). -//! -//! Tracker metrics are collected using a Publisher-Subscribe pattern. -//! -//! The factory function builds two structs: -//! -//! - An statistics event [`Sender`](torrust_tracker_events::sender::Sender) -//! - An statistics [`Repository`] -//! -//! ```text -//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); -//! ``` -//! -//! The statistics repository is responsible for storing the metrics in memory. -//! The statistics event sender allows sending events related to metrics. -//! There is an event listener that is receiving all the events and processing them with an event handler. -//! Then, the event handler updates the metrics depending on the received event. -use std::sync::Arc; - -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - -use crate::statistics::metrics::Metrics; -use crate::statistics::repository::Repository; - -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateActiveSwarmMetadata, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} - -/// It returns all the [`TrackerMetrics`] -pub async fn get_metrics( - in_memory_torrent_repository: Arc, - stats_repository: Arc, -) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let stats = stats_repository.get_stats().await; - - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - metric_collection: stats.metric_collection.clone(), - }, - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{self}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use torrust_tracker_test_helpers::configuration; - - use crate::event::bus::EventBus; - use crate::event::sender::Broadcaster; - use crate::statistics::describe_metrics; - use crate::statistics::event::listener::run_event_listener; - use crate::statistics::repository::Repository; - use crate::statistics::services::{get_metrics, TrackerMetrics}; - - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - #[tokio::test] - async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // HTTP core stats - let http_core_broadcaster = Broadcaster::default(); - let http_stats_repository = Arc::new(Repository::new()); - let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics.into(), - http_core_broadcaster.clone(), - )); - - if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); - } - - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; - - assert_eq!( - tracker_metrics, - TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata::default(), - protocol_metrics: describe_metrics(), - } - ); - } -} From 0284bef1eaf87dfa0884baca89f869672574d8f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:40:44 +0100 Subject: [PATCH 169/247] refactor: [#1581] remove non-labeled metrics in udp-tracker-core pkg --- .../src/statistics/event/handler.rs | 51 +------- .../src/statistics/metrics.rs | 116 ++++++++++++++---- .../src/statistics/repository.rs | 36 ------ .../src/statistics/services.rs | 9 -- 4 files changed, 96 insertions(+), 116 deletions(-) diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 039b6b0d5..e5d2b87a7 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -12,19 +12,6 @@ use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { Event::UdpConnect { connection: context } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("connect")); @@ -37,19 +24,6 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura }; } Event::UdpAnnounce { connection: context, .. } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); @@ -62,19 +36,6 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura }; } Event::UdpScrape { connection: context } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); @@ -127,7 +88,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled, 1); + assert_eq!(stats.udp4_connections_handled(), 1); } #[tokio::test] @@ -154,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled, 1); + assert_eq!(stats.udp4_announces_handled(), 1); } #[tokio::test] @@ -179,7 +140,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled, 1); + assert_eq!(stats.udp4_scrapes_handled(), 1); } #[tokio::test] @@ -204,7 +165,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled, 1); + assert_eq!(stats.udp6_connections_handled(), 1); } #[tokio::test] @@ -231,7 +192,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled, 1); + assert_eq!(stats.udp6_announces_handled(), 1); } #[tokio::test] @@ -256,6 +217,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled, 1); + assert_eq!(stats.udp6_scrapes_handled(), 1); } } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index e6ff8d5f6..57838c66f 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,37 +1,15 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -/// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. +use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + #[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { - /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, - /// A collection of metrics. pub metric_collection: MetricCollection, } @@ -64,3 +42,89 @@ impl Metrics { self.metric_collection.set_gauge(metric_name, labels, value, now) } } + +impl Metrics { + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } +} diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index c68fa14f7..ceee0e369 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -33,42 +33,6 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 24d25a25c..18a80bad1 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -69,15 +69,6 @@ pub async fn get_metrics( TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDPv4 - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - // UDPv6 - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - // Extendable metrics metric_collection: stats.metric_collection.clone(), }, } From f008a0a618cbbf221c6442fb32623b23157bb403 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:50:26 +0100 Subject: [PATCH 170/247] fix: test for request counters in http-tracker-core The IP faimly related to the counter (inet or inet6) dependos on the server binding IP. If the server is listening on a inet6 IP, then inet6 lreated counters should be increased. --- packages/http-tracker-core/src/statistics/event/handler.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index a1d8d5fc2..37c7a26b5 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -123,7 +123,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -151,7 +151,7 @@ mod tests { ))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, From 6183eba1a2bf42e7198350f2b205e76a682d9d52 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 17:19:53 +0100 Subject: [PATCH 171/247] refactor: [#1581] remove non-labeled metrics in udp-tracker-server pkg --- .../src/statistics/services.rs | 248 +++------------ .../src/statistics/event/handler/error.rs | 14 +- .../event/handler/request_aborted.rs | 8 +- .../event/handler/request_accepted.rs | 41 +-- .../event/handler/request_banned.rs | 8 +- .../event/handler/request_received.rs | 13 +- .../statistics/event/handler/response_sent.rs | 14 +- .../src/statistics/metrics.rs | 294 +++++++++++++++--- .../src/statistics/repository.rs | 158 ++-------- .../src/statistics/services.rs | 37 +-- .../tests/server/contract.rs | 4 +- 11 files changed, 334 insertions(+), 505 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 60c4a8ebd..e30febf00 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -4,16 +4,8 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_tracker_metrics::metric_name; -use torrust_udp_tracker_server::statistics::{ - self as udp_server_statistics, UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, - UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, -}; +use torrust_udp_tracker_server::statistics::{self as udp_server_statistics}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -109,26 +101,26 @@ async fn get_protocol_metrics( tcp6_announces_handled: http_stats.tcp6_announces_handled(), tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_requests_aborted: udp_server_stats.udp_requests_aborted(), + udp_requests_banned: udp_server_stats.udp_requests_banned(), udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns(), + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns(), + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns(), // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, - udp4_announces_handled: udp_server_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, + udp4_requests: udp_server_stats.udp4_requests(), + udp4_connections_handled: udp_server_stats.udp4_connections_handled(), + udp4_announces_handled: udp_server_stats.udp4_announces_handled(), + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled(), + udp4_responses: udp_server_stats.udp4_responses(), + udp4_errors_handled: udp_server_stats.udp4_errors_handled(), // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, + udp6_requests: udp_server_stats.udp6_requests(), + udp6_connections_handled: udp_server_stats.udp6_connections_handled(), + udp6_announces_handled: udp_server_stats.udp6_announces_handled(), + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled(), + udp6_responses: udp_server_stats.udp6_responses(), + udp6_errors_handled: udp_server_stats.udp6_errors_handled(), } } @@ -165,198 +157,30 @@ async fn get_protocol_metrics_from_labeled_metrics( // UDP - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_requests_aborted = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_requests_banned = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_banned_ips_total = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_connect_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_announce_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_scrape_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); + let udp_requests_banned = udp_server_stats.udp_requests_banned(); + let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); + let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns(); + let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns(); + let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns(); // UDPv4 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_requests = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_connections_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_announces_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_scrapes_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_responses = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_errors_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp4_requests = udp_server_stats.udp4_requests(); + let udp4_connections_handled = udp_server_stats.udp4_connections_handled(); + let udp4_announces_handled = udp_server_stats.udp4_announces_handled(); + let udp4_scrapes_handled = udp_server_stats.udp4_scrapes_handled(); + let udp4_responses = udp_server_stats.udp4_responses(); + let udp4_errors_handled = udp_server_stats.udp4_errors_handled(); // UDPv6 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_requests = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_connections_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_announces_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_scrapes_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_responses = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_errors_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp6_requests = udp_server_stats.udp6_requests(); + let udp6_connections_handled = udp_server_stats.udp6_connections_handled(); + let udp6_announces_handled = udp_server_stats.udp6_announces_handled(); + let udp6_scrapes_handled = udp_server_stats.udp6_scrapes_handled(); + let udp6_responses = udp_server_stats.udp6_responses(); + let udp6_errors_handled = udp_server_stats.udp6_errors_handled(); // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 7bde032fe..d83a0584d 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -14,21 +14,9 @@ pub async fn handle_event( repository: &Repository, now: DurationSinceUnixEpoch, ) { - update_global_fixed_metrics(&connection_context, repository).await; update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } -async fn update_global_fixed_metrics(connection_context: &ConnectionContext, repository: &Repository) { - match connection_context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - repository.increase_udp6_errors().await; - } - } -} - async fn update_extendable_metrics( connection_context: &ConnectionContext, opt_udp_request_kind: Option, @@ -149,6 +137,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_errors_handled, 1); + assert_eq!(stats.udp4_errors_handled(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index fc701df75..19e410d5e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -7,10 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), @@ -58,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); + assert_eq!(stats.udp_requests_aborted(), 1); } #[tokio::test] @@ -81,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); + assert_eq!(stats.udp_requests_aborted(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 37b668227..af92636df 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -12,35 +12,6 @@ pub async fn handle_event( stats_repository: &Repository, now: DurationSinceUnixEpoch, ) { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce { .. } => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); match stats_repository @@ -90,7 +61,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled, 1); + assert_eq!(stats.udp4_connections_handled(), 1); } #[tokio::test] @@ -118,7 +89,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled, 1); + assert_eq!(stats.udp4_announces_handled(), 1); } #[tokio::test] @@ -144,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled, 1); + assert_eq!(stats.udp4_scrapes_handled(), 1); } #[tokio::test] @@ -170,7 +141,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled, 1); + assert_eq!(stats.udp6_connections_handled(), 1); } #[tokio::test] @@ -198,7 +169,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled, 1); + assert_eq!(stats.udp6_announces_handled(), 1); } #[tokio::test] @@ -224,6 +195,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled, 1); + assert_eq!(stats.udp6_scrapes_handled(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index ce6e179a3..8badfa137 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -7,10 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), @@ -58,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); + assert_eq!(stats.udp_requests_banned(), 1); } #[tokio::test] @@ -81,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); + assert_eq!(stats.udp_requests_banned(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index 89f306f6a..eced5a215 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -7,17 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -65,6 +54,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_requests, 1); + assert_eq!(stats.udp4_requests(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 4e167a10e..7e05e483b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -13,16 +13,6 @@ pub async fn handle_event( stats_repository: &Repository, now: DurationSinceUnixEpoch, ) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } - let (result_label_value, kind_label_value) = match kind { UdpResponseKind::Ok { req_kind } => match req_kind { UdpRequestKind::Connect => { @@ -145,7 +135,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_responses, 1); + assert_eq!(stats.udp4_responses(), 1); } #[tokio::test] @@ -176,6 +166,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_responses, 1); + assert_eq!(stats.udp6_responses(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index ac6250872..8eba248d2 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,96 +1,296 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; +use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; + /// Metrics collected by the UDP tracker server. #[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} + +impl Metrics { // UDP /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_aborted(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) requests banned. - pub udp_requests_banned: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_banned(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Total number of banned IPs. - pub udp_banned_ips_total: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_banned_ips_total(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP connect requests. - pub udp_avg_connect_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_connect_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP announce requests. - pub udp_avg_announce_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_announce_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP scrape requests. - pub udp_avg_scrape_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_scrape_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_requests(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_responses(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_errors_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } // UDPv6 /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_requests(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, - - /// A collection of metrics. - pub metric_collection: MetricCollection, -} - -impl Metrics { - /// # Errors - /// - /// Returns an error if the metric does not exist and it cannot be created. - pub fn increase_counter( - &mut self, - metric_name: &MetricName, - labels: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - self.metric_collection.increment_counter(metric_name, labels, now) + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_responses(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 } - /// # Errors - /// - /// Returns an error if the metric does not exist and it cannot be created. - pub fn set_gauge( - &mut self, - metric_name: &MetricName, - labels: &LabelSet, - value: f64, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - self.metric_collection.set_gauge(metric_name, labels, value, now) + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_errors_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 } } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1a1db89c7..1851b78a8 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -34,70 +34,59 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp_requests_aborted(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - pub async fn increase_udp_requests_banned(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_banned += 1; - drop(stats_lock); - } + let result = stats_lock.increase_counter(metric_name, labels, now); - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; drop(stats_lock); - } - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); + result } - pub async fn increase_udp4_announces(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } + let result = stats_lock.set_gauge(metric_name, labels, value, now); - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; drop(stats_lock); - } - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); + result } #[allow(clippy::cast_precision_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + let udp_connections_handled = (stats_lock.udp4_connections_handled() + stats_lock.udp6_connections_handled()) as f64; - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + let previous_avg = stats_lock.udp_avg_connect_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; - stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg @@ -107,19 +96,17 @@ impl Repository { #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + let udp_announces_handled = (stats_lock.udp4_announces_handled() + stats_lock.udp6_announces_handled()) as f64; - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + let previous_avg = stats_lock.udp_avg_announce_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; - stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg @@ -129,95 +116,18 @@ impl Repository { #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled() + stats_lock.udp6_scrapes_handled()) as f64; - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; - stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg } - - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); - } - - /// # Errors - /// - /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn increase_counter( - &self, - metric_name: &MetricName, - labels: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - let mut stats_lock = self.stats.write().await; - - let result = stats_lock.increase_counter(metric_name, labels, now); - - drop(stats_lock); - - result - } - - /// # Errors - /// - /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn set_gauge( - &self, - metric_name: &MetricName, - labels: &LabelSet, - value: f64, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - let mut stats_lock = self.stats.write().await; - - let result = stats_lock.set_gauge(metric_name, labels, value, now); - - drop(stats_lock); - - result - } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index e6e5a28f3..0eac01270 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -39,8 +39,6 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; @@ -63,38 +61,14 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc, - ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDP - udp_requests_aborted: stats.udp_requests_aborted, - udp_requests_banned: stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: stats.udp4_requests, - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - udp4_responses: stats.udp4_responses, - udp4_errors_handled: stats.udp4_errors_handled, - // UDPv6 - udp6_requests: stats.udp6_requests, - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - udp6_responses: stats.udp6_responses, - udp6_errors_handled: stats.udp6_errors_handled, - // Extendable metrics metric_collection: stats.metric_collection.clone(), }, } @@ -106,9 +80,6 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use bittorrent_udp_tracker_core::services::banning::BanService; - use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; - use tokio::sync::RwLock; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; @@ -118,16 +89,10 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let stats_repository = Arc::new(Repository::new()); - let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), - stats_repository.clone(), - ) - .await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), stats_repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 0d9540289..2745f3407 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -273,7 +273,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned(); // This should return a timeout error match client.send(announce_request.into()).await { @@ -289,7 +289,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned(); let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 From a5524825452e82a37c25462fd101d6d2023a36bb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 17:28:22 +0100 Subject: [PATCH 172/247] refactor: [#1581] finished. Global metrics in API loaded from labeled metrics --- Cargo.lock | 1 - .../src/v1/context/stats/handlers.rs | 10 +-- .../src/v1/context/stats/routes.rs | 1 - packages/rest-tracker-api-core/Cargo.toml | 1 - .../src/statistics/services.rs | 81 +------------------ 5 files changed, 4 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f8215bbf..269f7a3a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4668,7 +4668,6 @@ dependencies = [ "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tracing", ] [[package]] diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index b907b861a..1b1f670a0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -41,21 +41,13 @@ pub struct QueryParams { pub async fn get_stats_handler( State(state): State<( Arc, - Arc>, Arc, Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics( - state.0.clone(), - state.1.clone(), - state.2.clone(), - state.3.clone(), - state.4.clone(), - ) - .await; + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index c2a1466e0..2bf3776fd 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -18,7 +18,6 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, - ban_service: Arc>, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let protocol_metrics_from_global_metrics = get_protocol_metrics( - ban_service.clone(), - http_stats_repository.clone(), - udp_server_stats_repository.clone(), - ) - .await; - - let protocol_metrics_from_labeled_metrics = - get_protocol_metrics_from_labeled_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await; - - // todo: - // We keep both metrics until we deploy to production and we can - // ensure that the protocol metrics from labeled metrics are correct. - // After that we can remove the `get_protocol_metrics` function and - // use only the `get_protocol_metrics_from_labeled_metrics` function. - // And also remove the code in repositories to generate the global metrics. - let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { - protocol_metrics_from_labeled_metrics - } else { - tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); - tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); - tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); - protocol_metrics_from_global_metrics - }; - TrackerMetrics { torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, - protocol_metrics, + protocol_metrics: get_protocol_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await, } } @@ -76,57 +50,9 @@ async fn get_torrents_metrics( torrents_metrics } -#[allow(deprecated)] -async fn get_protocol_metrics( - ban_service: Arc>, - http_stats_repository: Arc, - udp_server_stats_repository: Arc, -) -> ProtocolMetrics { - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - let http_stats = http_stats_repository.get_stats().await; - let udp_server_stats = udp_server_stats_repository.get_stats().await; - - // For backward compatibility we keep the `tcp4_connections_handled` and - // `tcp6_connections_handled` metrics. They don't make sense for the HTTP - // tracker, but we keep them for now. In new major versions we should remove - // them. - - ProtocolMetrics { - // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled() + http_stats.tcp4_scrapes_handled(), - tcp4_announces_handled: http_stats.tcp4_announces_handled(), - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled(), - // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled() + http_stats.tcp6_scrapes_handled(), - tcp6_announces_handled: http_stats.tcp6_announces_handled(), - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), - // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted(), - udp_requests_banned: udp_server_stats.udp_requests_banned(), - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns(), - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns(), - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns(), - // UDPv4 - udp4_requests: udp_server_stats.udp4_requests(), - udp4_connections_handled: udp_server_stats.udp4_connections_handled(), - udp4_announces_handled: udp_server_stats.udp4_announces_handled(), - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled(), - udp4_responses: udp_server_stats.udp4_responses(), - udp4_errors_handled: udp_server_stats.udp4_errors_handled(), - // UDPv6 - udp6_requests: udp_server_stats.udp6_requests(), - udp6_connections_handled: udp_server_stats.udp6_connections_handled(), - udp6_announces_handled: udp_server_stats.udp6_announces_handled(), - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled(), - udp6_responses: udp_server_stats.udp6_responses(), - udp6_errors_handled: udp_server_stats.udp6_errors_handled(), - } -} - #[allow(deprecated)] #[allow(clippy::too_many_lines)] -async fn get_protocol_metrics_from_labeled_metrics( +async fn get_protocol_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> ProtocolMetrics { @@ -307,7 +233,7 @@ mod tests { let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &swarm_coordination_registry_container.clone()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let _ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats let http_core_broadcaster = Broadcaster::default(); @@ -326,7 +252,6 @@ mod tests { let tracker_metrics = get_metrics( tracker_core_container.in_memory_torrent_repository.clone(), - ban_service.clone(), tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), From 0d9f88337a5117fba523e7f2cb84a70d46af9444 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:06:25 +0100 Subject: [PATCH 173/247] refactor(metrics): [#1580] convert Sum trait to use associated types for mathematically correct return types - Replace AggregateValue return type with associated Output type in Sum trait - Counter metrics now return u64 (preserving integer precision) - Gauge metrics now return f64 (avoiding unnecessary wrapper type) - Update all test cases to expect primitive types instead of AggregateValue - Convert primitive results to AggregateValue at collection level for backward compatibility - Use proper floating-point comparison in gauge tests with epsilon tolerance This change ensures each aggregate function returns the mathematically appropriate type while maintaining API compatibility for metric collections. --- packages/metrics/src/metric/aggregate/sum.rs | 81 +++++++++---------- .../src/metric_collection/aggregate.rs | 11 ++- 2 files changed, 47 insertions(+), 45 deletions(-) diff --git a/packages/metrics/src/metric/aggregate/sum.rs b/packages/metrics/src/metric/aggregate/sum.rs index f08ea7d55..30c2819b7 100644 --- a/packages/metrics/src/metric/aggregate/sum.rs +++ b/packages/metrics/src/metric/aggregate/sum.rs @@ -1,37 +1,34 @@ -use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; use crate::metric::Metric; pub trait Sum { - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue; + type Output; + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output; } impl Sum for Metric { - #[allow(clippy::cast_precision_loss)] - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { - let sum: f64 = self - .sample_collection + type Output = u64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection .iter() .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .map(|(_label_set, measurement)| measurement.value().primitive() as f64) - .sum(); - - sum.into() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum() } } impl Sum for Metric { - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { - let sum: f64 = self - .sample_collection + type Output = f64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection .iter() .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); - - sum.into() + .sum() } } @@ -40,7 +37,6 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; @@ -83,14 +79,14 @@ mod tests { } } - fn counter_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + fn counter_cases() -> Vec<(Metric, LabelSet, u64)> { // (metric, label set criteria, expected_aggregate_value) vec![ // Metric with one sample without label set ( MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), LabelSet::empty(), - 1.0.into(), + 1, ), // Metric with one sample with a label set ( @@ -98,7 +94,7 @@ mod tests { .with_sample(1.into(), &[("l1", "l1_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1, ), // Metric with two samples, different label sets, sum all ( @@ -107,7 +103,7 @@ mod tests { .with_sample(2.into(), &[("l2", "l2_value")].into()) .build(), LabelSet::empty(), - 3.0.into(), + 3, ), // Metric with two samples, different label sets, sum one ( @@ -116,7 +112,7 @@ mod tests { .with_sample(2.into(), &[("l2", "l2_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1, ), // Metric with two samples, same label key, different label values, sum by key ( @@ -125,7 +121,7 @@ mod tests { .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("l1", "l1_value")].into(), - 3.0.into(), + 3, ), // Metric with two samples, different label values, sum by subkey ( @@ -134,17 +130,17 @@ mod tests { .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("la", "la_value")].into(), - 1.0.into(), + 1, ), // Edge: Metric with no samples at all - (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + (MetricBuilder::default().build(), LabelSet::empty(), 0), // Edge: Metric with samples but no matching labels ( MetricBuilder::default() .with_sample(5.into(), &[("foo", "bar")].into()) .build(), [("not", "present")].into(), - 0.0.into(), + 0, ), // Edge: Metric with zero value ( @@ -152,7 +148,7 @@ mod tests { .with_sample(0.into(), &[("l3", "l3_value")].into()) .build(), [("l3", "l3_value")].into(), - 0.0.into(), + 0, ), // Edge: Metric with a very large value ( @@ -160,20 +156,19 @@ mod tests { .with_sample(u64::MAX.into(), &LabelSet::empty()) .build(), LabelSet::empty(), - #[allow(clippy::cast_precision_loss)] - (u64::MAX as f64).into(), + u64::MAX, ), ] } - fn gauge_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + fn gauge_cases() -> Vec<(Metric, LabelSet, f64)> { // (metric, label set criteria, expected_aggregate_value) vec![ // Metric with one sample without label set ( MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), LabelSet::empty(), - 1.0.into(), + 1.0, ), // Metric with one sample with a label set ( @@ -181,7 +176,7 @@ mod tests { .with_sample(1.0.into(), &[("l1", "l1_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1.0, ), // Metric with two samples, different label sets, sum all ( @@ -190,7 +185,7 @@ mod tests { .with_sample(2.0.into(), &[("l2", "l2_value")].into()) .build(), LabelSet::empty(), - 3.0.into(), + 3.0, ), // Metric with two samples, different label sets, sum one ( @@ -199,7 +194,7 @@ mod tests { .with_sample(2.0.into(), &[("l2", "l2_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1.0, ), // Metric with two samples, same label key, different label values, sum by key ( @@ -208,7 +203,7 @@ mod tests { .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("l1", "l1_value")].into(), - 3.0.into(), + 3.0, ), // Metric with two samples, different label values, sum by subkey ( @@ -217,17 +212,17 @@ mod tests { .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("la", "la_value")].into(), - 1.0.into(), + 1.0, ), // Edge: Metric with no samples at all - (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), // Edge: Metric with samples but no matching labels ( MetricBuilder::default() .with_sample(5.0.into(), &[("foo", "bar")].into()) .build(), [("not", "present")].into(), - 0.0.into(), + 0.0, ), // Edge: Metric with zero value ( @@ -235,7 +230,7 @@ mod tests { .with_sample(0.0.into(), &[("l3", "l3_value")].into()) .build(), [("l3", "l3_value")].into(), - 0.0.into(), + 0.0, ), // Edge: Metric with negative values ( @@ -244,7 +239,7 @@ mod tests { .with_sample(3.0.into(), &[("l5", "l5_value")].into()) .build(), LabelSet::empty(), - 1.0.into(), + 1.0, ), // Edge: Metric with a very large value ( @@ -252,7 +247,7 @@ mod tests { .with_sample(f64::MAX.into(), &LabelSet::empty()) .build(), LabelSet::empty(), - f64::MAX.into(), + f64::MAX, ), ] } @@ -274,8 +269,8 @@ mod tests { for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { let sum = metric.sum(criteria); - assert_eq!( - sum, *expected_value, + assert!( + (sum - expected_value).abs() <= f64::EPSILON, "at case {idx}, expected sum to be {expected_value}, got {sum}" ); } diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index 7fd744d92..a1afa30da 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -22,13 +22,20 @@ impl Sum for MetricCollection { impl Sum for MetricKindCollection { fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| { + let sum: u64 = metric.sum(label_set_criteria); + #[allow(clippy::cast_precision_loss)] + AggregateValue::new(sum as f64) + }) } } impl Sum for MetricKindCollection { fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| { + let sum: f64 = metric.sum(label_set_criteria); + AggregateValue::new(sum) + }) } } From db6b491edc2ecf219a88fa6b85c9e6de100520e1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:14:12 +0100 Subject: [PATCH 174/247] refactor(metrics): [#1580] add associated types to collection-level Sum trait - Convert collection Sum trait from fixed return type to associated Output type - MetricKindCollection now returns Option preserving integer precision - MetricKindCollection now returns Option for direct float access - MetricCollection maintains Option for backward compatibility - Simplify implementation by directly delegating to metric-level sum methods - Remove intermediate conversions in metric kind collections This completes the associated types pattern across both metric-level and collection-level Sum traits, allowing each implementation to return the most mathematically appropriate type while maintaining API compatibility. --- .../src/metric_collection/aggregate.rs | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index a1afa30da..8bda278d4 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -7,35 +7,40 @@ use crate::metric::MetricName; use crate::metric_collection::{MetricCollection, MetricKindCollection}; pub trait Sum { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; + type Output; + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output; } impl Sum for MetricCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { - return Some(value); + #[allow(clippy::cast_precision_loss)] + return Some(AggregateValue::new(value as f64)); + } + + if let Some(value) = self.gauges.sum(metric_name, label_set_criteria) { + return Some(AggregateValue::new(value)); } - self.gauges.sum(metric_name, label_set_criteria) + None } } impl Sum for MetricKindCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| { - let sum: u64 = metric.sum(label_set_criteria); - #[allow(clippy::cast_precision_loss)] - AggregateValue::new(sum as f64) - }) + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } impl Sum for MetricKindCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| { - let sum: f64 = metric.sum(label_set_criteria); - AggregateValue::new(sum) - }) + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } From 00ac210a90258afc2e5ee06368bb90e9a045731d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:40:03 +0100 Subject: [PATCH 175/247] refactor(metrics): [#1580] remove AggregateValue wrapper, return primitive types from aggregates - Remove AggregateValue struct and its entire module from metrics package - Simplify Sum trait in metric collections to return Option directly - Update MetricKindCollection implementations to cast counter values to f64 - Remove AggregateValue dependencies from http-tracker-core, udp-tracker-core, and udp-tracker-server - Eliminate unnecessary wrapper overhead in aggregate operations - Maintain backward compatibility by converting all aggregate results to f64 This change completes the metrics package refactoring by removing the generic AggregateValue wrapper that added no value when aggregate functions can return mathematically appropriate primitive types directly. --- .../src/statistics/metrics.rs | 12 +- packages/metrics/src/aggregate.rs | 143 ------------------ packages/metrics/src/lib.rs | 1 - .../src/metric_collection/aggregate.rs | 34 ++--- .../src/statistics/metrics.rs | 18 +-- .../src/statistics/metrics.rs | 54 +++---- 6 files changed, 42 insertions(+), 220 deletions(-) delete mode 100644 packages/metrics/src/aggregate.rs diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 05acea937..6aede8359 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -53,8 +53,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. @@ -67,8 +66,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. @@ -81,8 +79,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. @@ -95,7 +92,6 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs deleted file mode 100644 index 39b760fca..000000000 --- a/packages/metrics/src/aggregate.rs +++ /dev/null @@ -1,143 +0,0 @@ -use derive_more::Display; - -#[derive(Debug, Display, Clone, Copy, PartialEq, Default)] -pub struct AggregateValue(f64); - -impl AggregateValue { - #[must_use] - pub fn new(value: f64) -> Self { - Self(value) - } - - #[must_use] - pub fn value(&self) -> f64 { - self.0 - } -} - -impl From for AggregateValue { - fn from(value: f64) -> Self { - Self(value) - } -} - -impl From for f64 { - fn from(value: AggregateValue) -> Self { - value.0 - } -} - -#[cfg(test)] -mod tests { - use approx::assert_relative_eq; - - use super::*; - - #[test] - fn it_should_be_created_with_new() { - let value = AggregateValue::new(42.5); - assert_relative_eq!(value.value(), 42.5); - } - - #[test] - fn it_should_return_the_inner_value() { - let value = AggregateValue::new(123.456); - assert_relative_eq!(value.value(), 123.456); - } - - #[test] - fn it_should_handle_zero_value() { - let value = AggregateValue::new(0.0); - assert_relative_eq!(value.value(), 0.0); - } - - #[test] - fn it_should_handle_negative_values() { - let value = AggregateValue::new(-42.5); - assert_relative_eq!(value.value(), -42.5); - } - - #[test] - fn it_should_handle_infinity() { - let value = AggregateValue::new(f64::INFINITY); - assert_relative_eq!(value.value(), f64::INFINITY); - } - - #[test] - fn it_should_handle_nan() { - let value = AggregateValue::new(f64::NAN); - assert!(value.value().is_nan()); - } - - #[test] - fn it_should_be_created_from_f64() { - let value: AggregateValue = 42.5.into(); - assert_relative_eq!(value.value(), 42.5); - } - - #[test] - fn it_should_convert_to_f64() { - let value = AggregateValue::new(42.5); - let f64_value: f64 = value.into(); - assert_relative_eq!(f64_value, 42.5); - } - - #[test] - fn it_should_be_displayable() { - let value = AggregateValue::new(42.5); - assert_eq!(value.to_string(), "42.5"); - } - - #[test] - fn it_should_be_debuggable() { - let value = AggregateValue::new(42.5); - let debug_string = format!("{value:?}"); - assert_eq!(debug_string, "AggregateValue(42.5)"); - } - - #[test] - fn it_should_be_cloneable() { - let value = AggregateValue::new(42.5); - let cloned_value = value; - assert_eq!(value, cloned_value); - } - - #[test] - fn it_should_be_copyable() { - let value = AggregateValue::new(42.5); - let copied_value = value; - assert_eq!(value, copied_value); - } - - #[test] - fn it_should_support_equality_comparison() { - let value1 = AggregateValue::new(42.5); - let value2 = AggregateValue::new(42.5); - let value3 = AggregateValue::new(43.0); - - assert_eq!(value1, value2); - assert_ne!(value1, value3); - } - - #[test] - fn it_should_handle_special_float_values_in_equality() { - let nan1 = AggregateValue::new(f64::NAN); - let nan2 = AggregateValue::new(f64::NAN); - let infinity = AggregateValue::new(f64::INFINITY); - let neg_infinity = AggregateValue::new(f64::NEG_INFINITY); - - // NaN is not equal to itself in IEEE 754 - assert_ne!(nan1, nan2); - assert_eq!(infinity, AggregateValue::new(f64::INFINITY)); - assert_eq!(neg_infinity, AggregateValue::new(f64::NEG_INFINITY)); - assert_ne!(infinity, neg_infinity); - } - - #[test] - fn it_should_handle_conversion_roundtrip() { - let original_value = 42.5; - let aggregate_value = AggregateValue::from(original_value); - let converted_back: f64 = aggregate_value.into(); - assert_relative_eq!(original_value, converted_back); - } -} diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index c53e9dd02..997cd3c8c 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -1,4 +1,3 @@ -pub mod aggregate; pub mod counter; pub mod gauge; pub mod label; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index 8bda278d4..62b2ca498 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -1,4 +1,3 @@ -use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; @@ -7,21 +6,17 @@ use crate::metric::MetricName; use crate::metric_collection::{MetricCollection, MetricKindCollection}; pub trait Sum { - type Output; - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output; + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; } impl Sum for MetricCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { - #[allow(clippy::cast_precision_loss)] - return Some(AggregateValue::new(value as f64)); + return Some(value); } if let Some(value) = self.gauges.sum(metric_name, label_set_criteria) { - return Some(AggregateValue::new(value)); + return Some(value); } None @@ -29,17 +24,16 @@ impl Sum for MetricCollection { } impl Sum for MetricKindCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + #[allow(clippy::cast_precision_loss)] + self.metrics + .get(metric_name) + .map(|metric| metric.sum(label_set_criteria) as f64) } } impl Sum for MetricKindCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } @@ -81,10 +75,10 @@ mod tests { ) .unwrap(); - assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); assert_eq!( collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), - Some(1.0.into()) + Some(1.0) ); } @@ -114,10 +108,10 @@ mod tests { ) .unwrap(); - assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); assert_eq!( collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), - Some(1.0.into()) + Some(1.0) ); } } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 57838c66f..db83c1c1d 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -54,8 +54,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -68,8 +67,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. @@ -82,8 +80,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. @@ -96,8 +93,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -110,8 +106,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. @@ -124,7 +119,6 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 8eba248d2..d3f273665 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -56,8 +56,7 @@ impl Metrics { pub fn udp_requests_aborted(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) requests banned. @@ -67,8 +66,7 @@ impl Metrics { pub fn udp_requests_banned(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of banned IPs. @@ -78,8 +76,7 @@ impl Metrics { pub fn udp_banned_ips_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP connect requests. @@ -92,8 +89,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP announce requests. @@ -106,8 +102,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP scrape requests. @@ -120,8 +115,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } // UDPv4 @@ -135,8 +129,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) connections from IPv4 peers. @@ -149,8 +142,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -163,8 +155,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. @@ -177,8 +168,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) responses from IPv4 peers. @@ -191,8 +181,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. @@ -205,8 +194,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } // UDPv6 @@ -220,8 +208,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. @@ -234,8 +221,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -248,8 +234,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. @@ -262,8 +247,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) responses from IPv6 peers. @@ -276,8 +260,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. @@ -290,7 +273,6 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } From dfd950d715f253ff4740b518564f62ec35977bdb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:55:50 +0100 Subject: [PATCH 176/247] refactor(metrics): [#1580] reorganize metric collection aggregates into submodules - Move metric_collection/aggregate.rs to aggregate/sum.rs submodule - Create proper module structure for aggregate operations - Update import paths in http-tracker-core, udp-tracker-core, and udp-tracker-server - Change imports from `aggregate::Sum` to `aggregate::sum::Sum` - Maintain the same Sum trait functionality with cleaner module organization This reorganization prepares for potential future aggregate operations beyond just sum while keeping the existing Sum trait API intact. --- packages/http-tracker-core/src/statistics/metrics.rs | 2 +- packages/metrics/src/metric_collection/aggregate/mod.rs | 1 + .../src/metric_collection/{aggregate.rs => aggregate/sum.rs} | 2 +- packages/udp-tracker-core/src/statistics/metrics.rs | 2 +- packages/udp-tracker-server/src/statistics/metrics.rs | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 packages/metrics/src/metric_collection/aggregate/mod.rs rename packages/metrics/src/metric_collection/{aggregate.rs => aggregate/sum.rs} (98%) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 6aede8359..00d09b803 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; diff --git a/packages/metrics/src/metric_collection/aggregate/mod.rs b/packages/metrics/src/metric_collection/aggregate/mod.rs new file mode 100644 index 000000000..dce785d95 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/mod.rs @@ -0,0 +1 @@ +pub mod sum; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate/sum.rs similarity index 98% rename from packages/metrics/src/metric_collection/aggregate.rs rename to packages/metrics/src/metric_collection/aggregate/sum.rs index 62b2ca498..3285fa8f1 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate/sum.rs @@ -47,7 +47,7 @@ mod tests { use crate::label::LabelValue; use crate::label_name; - use crate::metric_collection::aggregate::Sum; + use crate::metric_collection::aggregate::sum::Sum; #[test] fn type_counter_with_two_samples() { diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index db83c1c1d..98906a596 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index d3f273665..c50966bc6 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; From 7df7d367d8da85122e0423e3521065ec602ee748 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 09:06:19 +0100 Subject: [PATCH 177/247] docs(metrics): enhance README with comprehensive documentation and examples - Add detailed overview and key features section - Include quick start guide with practical usage examples - Document architecture with core components and type system - Add comprehensive development guide with building, testing, and coverage - Include performance considerations and compatibility notes - Add contributing guidelines and related projects - Transform from basic description to full developer documentation - Update cSpell.json with new technical terms (println, serde) This provides much better onboarding for developers and users of the metrics library. --- packages/metrics/README.md | 185 +++++++++++++++++++++++++++++++++-- packages/metrics/cSpell.json | 2 + 2 files changed, 177 insertions(+), 10 deletions(-) diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 885d6fa45..9f3883fba 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -1,37 +1,202 @@ # Torrust Tracker Metrics -A library with the metrics types used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. +A comprehensive metrics library providing type-safe metric collection, aggregation, and Prometheus export functionality for the [Torrust Tracker](https://github.com/torrust/torrust-tracker) ecosystem. + +## Overview + +This library offers a robust metrics system designed specifically for tracking and monitoring BitTorrent tracker performance. It provides type-safe metric collection with support for labels, time-series data, and multiple export formats including Prometheus. + +## Key Features + +- **Type-Safe Metrics**: Strongly typed `Counter` and `Gauge` metrics with compile-time guarantees +- **Label Support**: Rich labeling system for multi-dimensional metrics +- **Time-Series Data**: Built-in support for timestamped samples +- **Prometheus Export**: Native Prometheus format serialization +- **Aggregation Functions**: Sum operations with mathematically appropriate return types +- **JSON Serialization**: Full serde support for all metric types +- **Memory Efficient**: Optimized data structures for high-performance scenarios + +## Quick Start + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +torrust-tracker-metrics = "3.0.0" +``` + +### Basic Usage + +```rust +use torrust_tracker_metrics::{ + metric_collection::MetricCollection, + label::{LabelSet, LabelValue}, + metric_name, label_name, +}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +// Create a metric collection +let mut metrics = MetricCollection::default(); + +// Define labels +let labels: LabelSet = [ + (label_name!("server"), LabelValue::new("tracker-01")), + (label_name!("protocol"), LabelValue::new("http")), +].into(); + +// Record metrics +let time = DurationSinceUnixEpoch::from_secs(1234567890); +metrics.increment_counter( + &metric_name!("requests_total"), + &labels, + time, +)?; + +metrics.set_gauge( + &metric_name!("active_connections"), + &labels, + 42.0, + time, +)?; + +// Export to Prometheus format +let prometheus_output = metrics.to_prometheus(); +println!("{}", prometheus_output); +``` + +### Metric Aggregation + +```rust +use torrust_tracker_metrics::metric_collection::aggregate::Sum; + +// Sum all counter values matching specific labels +let total_requests = metrics.sum( + &metric_name!("requests_total"), + &[("server", "tracker-01")].into(), +); + +println!("Total requests: {:?}", total_requests); +``` + +## Architecture + +### Core Components + +- **`Counter`**: Monotonically increasing integer values (u64) +- **`Gauge`**: Arbitrary floating-point values that can increase or decrease (f64) +- **`Metric`**: Generic metric container with metadata (name, description, unit) +- **`MetricCollection`**: Type-safe collection managing both counters and gauges +- **`LabelSet`**: Key-value pairs for metric dimensionality +- **`Sample`**: Timestamped metric values with associated labels + +### Type System + +The library uses Rust's type system to ensure metric safety: + +```rust +// Counter operations return u64 +let counter_sum: Option = counter_collection.sum(&name, &labels); + +// Gauge operations return f64 +let gauge_sum: Option = gauge_collection.sum(&name, &labels); + +// Mixed collections convert to f64 for compatibility +let mixed_sum: Option = metric_collection.sum(&name, &labels); +``` + +### Module Structure + +```output +src/ +├── counter.rs # Counter metric type +├── gauge.rs # Gauge metric type +├── metric/ # Generic metric container +│ ├── mod.rs +│ ├── name.rs # Metric naming +│ ├── description.rs # Metric descriptions +│ └── aggregate/ # Metric-level aggregations +├── metric_collection/ # Collection management +│ ├── mod.rs +│ └── aggregate/ # Collection-level aggregations +├── label/ # Label system +│ ├── name.rs # Label names +│ ├── value.rs # Label values +│ └── set.rs # Label collections +├── sample.rs # Timestamped values +├── sample_collection.rs # Sample management +├── prometheus.rs # Prometheus export +└── unit.rs # Measurement units +``` ## Documentation -[Crate documentation](https://docs.rs/torrust-tracker-metrics). +- [Crate documentation](https://docs.rs/torrust-tracker-metrics) +- [API Reference](https://docs.rs/torrust-tracker-metrics/latest/torrust_tracker_metrics/) + +## Development -## Testing +### Code Coverage -Run coverage report: +Run basic coverage report: ```console cargo llvm-cov --package torrust-tracker-metrics ``` -Generate LCOV report with `llvm-cov` (for Visual Studio Code extension): +Generate LCOV report (for IDE integration): ```console mkdir -p ./.coverage -cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info +cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info ``` -Generate HTML report with `llvm-cov`: +Generate detailed HTML coverage report: + +Generate detailed HTML coverage report: ```console mkdir -p ./.coverage -cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage ``` +Open the coverage report in your browser: + +```console +open ./.coverage/index.html # macOS +xdg-open ./.coverage/index.html # Linux +``` + +## Performance Considerations + +- **Memory Usage**: Metrics are stored in-memory with efficient HashMap-based collections +- **Label Cardinality**: Be mindful of label combinations as they create separate time series +- **Aggregation**: Sum operations are optimized for both single-type and mixed collections + +## Compatibility + +This library is designed to be compatible with the standard Rust [metrics](https://crates.io/crates/metrics) crate ecosystem where possible. + +## Contributing + +We welcome contributions! Please see the main [Torrust Tracker repository](https://github.com/torrust/torrust-tracker) for contribution guidelines. + +### Reporting Issues + +- [Bug Reports](https://github.com/torrust/torrust-tracker/issues/new?template=bug_report.md) +- [Feature Requests](https://github.com/torrust/torrust-tracker/issues/new?template=feature_request.md) + ## Acknowledgements -We copied some parts like units or function names and signatures from the crate [metrics](https://crates.io/crates/metrics) because we wanted to make it compatible as much as possible with it. In the future, we may consider using the `metrics` crate directly instead of maintaining our own version. +This library draws inspiration from the Rust [metrics](https://crates.io/crates/metrics) crate, incorporating compatible APIs and naming conventions where possible. We may consider migrating to the standard metrics crate in future versions while maintaining our specialized functionality. + +Special thanks to the Rust metrics ecosystem contributors for establishing excellent patterns for metrics collection and export. ## License -The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). +This project is licensed under the [GNU AFFERO GENERAL PUBLIC LICENSE v3.0](./LICENSE). + +## Related Projects + +- [Torrust Tracker](https://github.com/torrust/torrust-tracker) - The main BitTorrent tracker +- [metrics](https://crates.io/crates/metrics) - Standard Rust metrics facade +- [prometheus](https://crates.io/crates/prometheus) - Prometheus client library diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json index 1a2c13d2e..f04cce9e3 100644 --- a/packages/metrics/cSpell.json +++ b/packages/metrics/cSpell.json @@ -6,7 +6,9 @@ "Kibibytes", "Mebibytes", "ñaca", + "println", "rstest", + "serde", "subsec", "Tebibytes", "thiserror" From d2e75e3f78f367e5f2829bbe5adfedc549fb24f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 10:57:11 +0100 Subject: [PATCH 178/247] refactor: [#1405] gracefull shutdown for listeners Events listeners listen for the cancelation request instead of directly for the CRTR+c signal. This will allow implementing centralized policies for shutdown and alternative conditions. --- Cargo.lock | 8 +++++ Cargo.toml | 1 + packages/axum-http-tracker-server/Cargo.toml | 1 + .../src/environment.rs | 6 ++++ .../axum-http-tracker-server/src/server.rs | 5 +++- .../src/v1/handlers/announce.rs | 6 +++- .../src/v1/handlers/scrape.rs | 5 +++- packages/events/src/shutdown.rs | 0 packages/http-tracker-core/Cargo.toml | 1 + .../http-tracker-core/benches/helpers/util.rs | 5 +++- .../src/services/announce.rs | 5 +++- .../src/statistics/event/listener.rs | 23 +++++++------- packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 5 +++- .../swarm-coordination-registry/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 27 +++++++++-------- packages/tracker-core/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 20 ++++++------- .../tracker-core/tests/common/test_env.rs | 5 ++++ packages/udp-tracker-core/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 22 +++++++------- packages/udp-tracker-server/Cargo.toml | 1 + .../src/banning/event/listener.rs | 22 ++++++++------ .../udp-tracker-server/src/environment.rs | 9 ++++++ .../src/statistics/event/listener.rs | 22 +++++++------- src/app.rs | 12 ++++---- src/bootstrap/jobs/http_tracker_core.rs | 8 ++++- src/bootstrap/jobs/manager.rs | 30 +++++++++++++++++-- src/bootstrap/jobs/torrent_repository.rs | 8 ++++- src/bootstrap/jobs/tracker_core.rs | 8 ++++- src/bootstrap/jobs/udp_tracker_core.rs | 8 ++++- src/bootstrap/jobs/udp_tracker_server.rs | 11 +++++-- src/main.rs | 2 ++ 33 files changed, 206 insertions(+), 84 deletions(-) create mode 100644 packages/events/src/shutdown.rs diff --git a/Cargo.lock b/Cargo.lock index 269f7a3a2..b523c8b60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -587,6 +587,7 @@ dependencies = [ "serde_json", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -673,6 +674,7 @@ dependencies = [ "testcontainers", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -705,6 +707,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -4565,6 +4568,7 @@ dependencies = [ "serde_bytes", "serde_repr", "tokio", + "tokio-util", "torrust-axum-server", "torrust-server-lib", "torrust-tracker-clock", @@ -4661,6 +4665,7 @@ dependencies = [ "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "tokio", + "tokio-util", "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-metrics", @@ -4704,6 +4709,7 @@ dependencies = [ "serde_json", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", "torrust-axum-rest-tracker-api-server", @@ -4851,6 +4857,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -4909,6 +4916,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", diff --git a/Cargo.toml b/Cargo.toml index 976176155..dbc39bdf8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-rest-tracker-api-server" } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index fa195489c..eb2c2cad3 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -28,6 +28,7 @@ hyper = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 6e58c2cac..616973a0f 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -6,6 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -21,6 +22,7 @@ pub struct Environment { pub registar: Registar, pub server: HttpServer, pub event_listener_job: Option>, + pub cancellation_token: CancellationToken, } impl Environment { @@ -59,6 +61,7 @@ impl Environment { registar: Registar::default(), server, event_listener_job: None, + cancellation_token: CancellationToken::new(), } } @@ -72,6 +75,7 @@ impl Environment { // Start the event listener let event_listener_job = run_event_listener( self.container.http_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.http_tracker_core_container.stats_repository, ); @@ -87,6 +91,7 @@ impl Environment { registar: self.registar.clone(), server, event_listener_job: Some(event_listener_job), + cancellation_token: self.cancellation_token, } } } @@ -117,6 +122,7 @@ impl Environment { registar: Registar::default(), server, event_listener_job: None, + cancellation_token: self.cancellation_token, } } diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ba0dd8c6e..2b43be0a9 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -256,6 +256,7 @@ mod tests { use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::container::TrackerCoreContainer; + use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -265,6 +266,8 @@ mod tests { use crate::server::{HttpServer, Launcher}; pub fn initialize_container(configuration: &Configuration) -> HttpTrackerCoreContainer { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(configuration.core.clone()); let http_trackers = configuration @@ -287,7 +290,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if configuration.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index e21a485cf..ce718cd30 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -123,6 +123,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; @@ -149,6 +150,9 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> CoreHttpTrackerServices { + let cancellation_token = CancellationToken::new(); + + // Initialize the core tracker services with the provided configuration. let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -175,7 +179,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index b48d6e036..bdd4378f3 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -97,6 +97,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; @@ -127,6 +128,8 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); @@ -146,7 +149,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/events/src/shutdown.rs b/packages/events/src/shutdown.rs new file mode 100644 index 000000000..e69de29bb diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 45af59baa..04a6c96b6 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -23,6 +23,7 @@ futures = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 414d3b40e..028d7c535 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -20,6 +20,7 @@ use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::peer::Peer; @@ -42,6 +43,8 @@ pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrack } pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -69,7 +72,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 8d12da713..08ac93f68 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -216,6 +216,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_test_helpers::configuration; @@ -236,6 +237,8 @@ mod tests { } fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -263,7 +266,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 6730d4c70..ff2937a59 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,29 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down HTTP tracker core event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down HTTP tracker core event listener."); break; } @@ -42,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); + tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver lagged by {} events.", n); } } } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index cc8eda903..be6d493d7 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -18,6 +18,7 @@ bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-trac bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 44c82bfea..a8132d4fd 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -210,6 +210,7 @@ mod tests { use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; @@ -224,6 +225,8 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { + let cancellation_token = CancellationToken::new(); + let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); @@ -244,7 +247,7 @@ mod tests { )); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } // UDP server stats diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 074562a47..45359ad81 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -24,6 +24,7 @@ futures = "0" serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/swarm-coordination-registry/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs index 9ff707818..b578d1284 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/listener.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,29 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, SWARM_COORDINATION_REGISTRY_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting swarm coordination registry event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received cancellation request, shutting down swarm coordination registry event listener."); break; } @@ -42,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver closed."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver lagged by {} events.", n); } } } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index f04a3b89b..dfc83e58e 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -27,6 +27,7 @@ serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index d3beaf41f..8d2d74c71 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; use torrust_tracker_swarm_coordination_registry::event::receiver::Receiver; @@ -13,6 +14,7 @@ use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, + cancellation_token: CancellationToken, repository: &Arc, db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, @@ -20,37 +22,35 @@ pub fn run_event_listener( let stats_repository = repository.clone(); let db_downloads_metric_repository: Arc = db_downloads_metric_repository.clone(); - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting tracker core event listener"); tokio::spawn(async move { dispatch_events( receiver, + cancellation_token, stats_repository, db_downloads_metric_repository, persistent_torrent_completed_stat, ) .await; - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core listener finished"); }) } async fn dispatch_events( mut receiver: Receiver, + cancellation_token: CancellationToken, stats_repository: Arc, db_downloads_metric_repository: Arc, persistent_torrent_completed_stat: bool, ) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener"); + () = cancellation_token.cancelled() => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received cancellation request, shutting down tracker core event listener."); break; } @@ -65,11 +65,11 @@ async fn dispatch_events( Err(e) => { match e { RecvError::Closed => { - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver closed"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver closed"); break; } RecvError::Lagged(n) => { - tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver lagged by {} events", n); + tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver lagged by {} events", n); } } } diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index d3bc9652a..3fe0464fe 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -7,6 +7,7 @@ use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::statistics::persisted::load_persisted_metrics; use tokio::task::yield_now; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; @@ -66,15 +67,19 @@ impl TestEnv { async fn run_jobs(&self) { let mut jobs = vec![]; + let cancellation_token = CancellationToken::new(); let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), &self.swarm_coordination_registry_container.stats_repository, ); + jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 290c5fbfd..b3007eb80 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -28,6 +28,7 @@ rand = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 9b6f2e574..b11bcce85 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,28 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker core event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker core event listener."); break; } @@ -41,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver lagged by {} events.", n); } } } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index c0bc94ce3..160fe58f9 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -26,6 +26,7 @@ ringbuf = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs index fee3395fa..0d579f912 100644 --- a/packages/udp-tracker-server/src/banning/event/listener.rs +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -4,6 +4,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::sync::RwLock; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -15,6 +16,7 @@ use crate::CurrentClock; #[must_use] pub fn run_event_listener( receiver: Receiver, + cancellation_token: CancellationToken, ban_service: &Arc>, repository: &Arc, ) -> JoinHandle<()> { @@ -24,22 +26,24 @@ pub fn run_event_listener( tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); tokio::spawn(async move { - dispatch_events(receiver, ban_service_clone, repository_clone).await; + dispatch_events(receiver, cancellation_token, ban_service_clone, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); }) } -async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>, repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events( + mut receiver: Receiver, + cancellation_token: CancellationToken, + ban_service: Arc>, + repository: Arc, +) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener (banning)"); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); break; } @@ -49,11 +53,11 @@ async fn dispatch_events(mut receiver: Receiver, ban_service: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) lagged by {} events.", n); } } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 61b1cba63..13e18ba9b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; @@ -25,6 +26,7 @@ where pub udp_core_event_listener_job: Option>, pub udp_server_stats_event_listener_job: Option>, pub udp_server_banning_event_listener_job: Option>, + pub cancellation_token: CancellationToken, } impl Environment { @@ -46,6 +48,7 @@ impl Environment { udp_core_event_listener_job: None, udp_server_stats_event_listener_job: None, udp_server_banning_event_listener_job: None, + cancellation_token: CancellationToken::new(), } } @@ -57,21 +60,25 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; + // Start the UDP tracker core event listener let udp_core_event_listener_job = Some(bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( self.container.udp_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_core_container.stats_repository, )); // Start the UDP tracker server event listener (statistics) let udp_server_stats_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_server_container.stats_repository, )); // Start the UDP tracker server event listener (banning) let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_core_container.ban_service, &self.container.udp_tracker_server_container.stats_repository, )); @@ -95,6 +102,7 @@ impl Environment { udp_core_event_listener_job, udp_server_stats_event_listener_job, udp_server_banning_event_listener_job, + cancellation_token: self.cancellation_token, } } } @@ -150,6 +158,7 @@ impl Environment { udp_core_event_listener_job: None, udp_server_stats_event_listener_job: None, udp_server_banning_event_listener_job: None, + cancellation_token: self.cancellation_token, } } diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index ae659c15e..caaf5a2bc 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -11,28 +12,29 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let repository_clone = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, repository_clone).await; + dispatch_events(receiver, cancellation_token, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); break; } @@ -42,11 +44,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver lagged by {} events.", n); } } } diff --git a/src/app.rs b/src/app.rs index 58d758d7f..2149a6d4c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -140,28 +140,28 @@ fn start_swarm_coordination_registry_event_listener( ) { job_manager.push_opt( "swarm_coordination_registry_event_listener", - jobs::torrent_repository::start_event_listener(config, app_container), + jobs::torrent_repository::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "tracker_core_event_listener", - jobs::tracker_core::start_event_listener(config, app_container), + jobs::tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "http_core_event_listener", - jobs::http_tracker_core::start_event_listener(config, app_container), + jobs::http_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "udp_core_event_listener", - jobs::udp_tracker_core::start_event_listener(config, app_container), + jobs::udp_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } @@ -172,14 +172,14 @@ fn start_udp_server_stats_event_listener( ) { job_manager.push_opt( "udp_server_stats_event_listener", - jobs::udp_tracker_server::start_stats_event_listener(config, app_container), + jobs::udp_tracker_server::start_stats_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_udp_server_banning_event_listener(app_container: &Arc, job_manager: &mut JobManager) { job_manager.push( "udp_server_banning_event_listener", - jobs::udp_tracker_server::start_banning_event_listener(app_container), + jobs::udp_tracker_server::start_banning_event_listener(app_container, job_manager.new_cancellation_token()), ); } diff --git a/src/bootstrap/jobs/http_tracker_core.rs b/src/bootstrap/jobs/http_tracker_core.rs index 952c80b40..ab71b9a0f 100644 --- a/src/bootstrap/jobs/http_tracker_core.rs +++ b/src/bootstrap/jobs/http_tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( app_container.http_tracker_core_services.event_bus.receiver(), + cancellation_token, &app_container.http_tracker_core_services.stats_repository, ); diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs index 53733844b..565cd7b73 100644 --- a/src/bootstrap/jobs/manager.rs +++ b/src/bootstrap/jobs/manager.rs @@ -2,13 +2,14 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio::time::timeout; +use tokio_util::sync::CancellationToken; use tracing::{info, warn}; /// Represents a named background job. #[derive(Debug)] pub struct Job { - pub name: String, - pub handle: JoinHandle<()>, + name: String, + handle: JoinHandle<()>, } impl Job { @@ -24,12 +25,16 @@ impl Job { #[derive(Debug, Default)] pub struct JobManager { jobs: Vec, + cancellation_token: CancellationToken, } impl JobManager { #[must_use] pub fn new() -> Self { - Self { jobs: Vec::new() } + Self { + jobs: Vec::new(), + cancellation_token: CancellationToken::new(), + } } pub fn push>(&mut self, name: N, handle: JoinHandle<()>) { @@ -42,6 +47,25 @@ impl JobManager { } } + #[must_use] + pub fn new_cancellation_token(&self) -> CancellationToken { + self.cancellation_token.clone() + } + + /// Cancels all jobs using the shared cancellation token. + /// + /// Notice that this does not cancel the jobs immediately, but rather + /// signals them to stop. The jobs themselves must handle the cancellation + /// token appropriately. + /// + /// Notice jobs might be pushed into the manager without a cancellation + /// token, so this method will not cancel those jobs. Some tasks might + /// decide to listen for CTRL+c signal directly, or implement their own + /// cancellation logic. + pub fn cancel(&self) { + self.cancellation_token.cancel(); + } + /// Waits sequentially for all jobs to complete, with a graceful timeout per /// job. pub async fn wait_for_all(mut self, grace_period: Duration) { diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index 44ffdf53b..e49323735 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, &app_container.swarm_coordination_registry_container.stats_repository, ); diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index f2fc25ef3..d881f4cd2 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, app_container diff --git a/src/bootstrap/jobs/udp_tracker_core.rs b/src/bootstrap/jobs/udp_tracker_core.rs index 689fa8301..dd7e8c165 100644 --- a/src/bootstrap/jobs/udp_tracker_core.rs +++ b/src/bootstrap/jobs/udp_tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( app_container.udp_tracker_core_services.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_core_services.stats_repository, ); Some(job) diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 3e8a7aaa8..fc6df9c16 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_stats_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_server_container.stats_repository, ); Some(job) @@ -19,9 +25,10 @@ pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { +pub fn start_banning_event_listener(app_container: &Arc, cancellation_token: CancellationToken) -> JoinHandle<()> { torrust_udp_tracker_server::banning::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_core_services.ban_service, &app_container.udp_tracker_server_container.stats_repository, ) diff --git a/src/main.rs b/src/main.rs index a49c3aeba..7012ecaa7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,6 +10,8 @@ async fn main() { _ = tokio::signal::ctrl_c() => { tracing::info!("Torrust tracker shutting down ..."); + jobs.cancel(); + jobs.wait_for_all(Duration::from_secs(10)).await; tracing::info!("Torrust tracker successfully shutdown."); From f7ab993e96a050ddbbd1dd8467bb5bd1ef8c411d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 19:41:33 +0100 Subject: [PATCH 179/247] refactor: [#1589] add logs for debugging --- .../src/statistics/repository.rs | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1851b78a8..fa85610a0 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -89,6 +89,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_connections_handled + ); + new_avg } @@ -109,6 +117,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_announces_handled + ); + new_avg } @@ -128,6 +144,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_scrapes_handled + ); + new_avg } } From 5fc255fa849ad88e977f10e45640176bfd134d26 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 11:07:53 +0100 Subject: [PATCH 180/247] tests(udp-tracker-server): [#1589] add unit tests to statistics::repository::Repository --- cSpell.json | 1 + .../src/statistics/repository.rs | 512 ++++++++++++++++++ 2 files changed, 513 insertions(+) diff --git a/cSpell.json b/cSpell.json index fcbf53f1f..647dd24a2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "chrono", "ciphertext", "clippy", + "cloneable", "codecov", "codegen", "completei", diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index fa85610a0..eb0951614 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -155,3 +155,515 @@ impl Repository { new_avg } } + +#[cfg(test)] +mod tests { + use core::f64; + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::*; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let repo = Repository::default(); + assert!(!std::ptr::eq(&repo.stats, &Repository::new().stats)); + } + + #[test] + fn it_should_be_cloneable() { + let repo = Repository::new(); + let cloned_repo = repo.clone(); + assert!(!std::ptr::eq(&repo.stats, &cloned_repo.stats)); + } + + #[tokio::test] + async fn it_should_be_initialized_with_described_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; + + // Check that the described metrics are present + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + } + + #[tokio::test] + async fn it_should_return_a_read_guard_to_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; + + // Should be able to read metrics through the guard + assert_eq!(stats.udp_requests_aborted(), 0); + assert_eq!(stats.udp_requests_banned(), 0); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Increase a counter metric + let result = repo + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await; + + assert!(result.is_ok()); + + // Verify the counter was incremented + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 1); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_multiple_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Increase counter multiple times + for _ in 0..5 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await + .unwrap(); + } + + // Verify the counter was incremented correctly + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 5); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + let labels_ipv4 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels_ipv6 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Increase counters with different labels + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv4, now) + .await + .unwrap(); + + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv6, now) + .await + .unwrap(); + + // Verify both labeled metrics + let stats = repo.get_stats().await; + assert_eq!(stats.udp4_requests(), 1); + assert_eq!(stats.udp6_requests(), 1); + } + + #[tokio::test] + async fn it_should_set_a_gauge_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set a gauge metric + let result = repo + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now) + .await; + + assert!(result.is_ok()); + + // Verify the gauge was set + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 42); + } + + #[tokio::test] + async fn it_should_overwrite_previous_value_when_setting_a_gauge_with_a_previous_value() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set gauge to initial value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .await + .unwrap(); + + // Overwrite with new value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 25.0, now) + .await + .unwrap(); + + // Verify the gauge has the new value + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 25); + } + + #[tokio::test] + async fn it_should_allow_setting_a_gauge_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + let labels_connect = LabelSet::from([("request_kind", "connect")]); + let labels_announce = LabelSet::from([("request_kind", "announce")]); + + // Set gauges with different labels + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_connect, + 1000.0, + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_announce, + 2000.0, + now, + ) + .await + .unwrap(); + + // Verify both labeled metrics + let stats = repo.get_stats().await; + assert_eq!(stats.udp_avg_connect_processing_time_ns(), 1000); + assert_eq!(stats.udp_avg_announce_processing_time_ns(), 2000); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_connect_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial connections handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + + // Simulate 2 IPv4 and 1 IPv6 connections + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) + .await + .unwrap(); + + // Set initial average to 1000ns + let connect_labels = LabelSet::from([("request_kind", "connect")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 2000ns + let processing_time = Duration::from_nanos(2000); + let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_connections + // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 + let expected_avg = 1000.0 + (2000.0 - 1000.0) / 3.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_announce_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial announces handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + // Simulate 3 IPv4 and 2 IPv6 announces + for _ in 0..3 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + } + for _ in 0..2 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) + .await + .unwrap(); + } + + // Set initial average to 500ns + let announce_labels = LabelSet::from([("request_kind", "announce")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 500.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1500ns + let processing_time = Duration::from_nanos(1500); + let new_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_announces + // 500 + (1500 - 500) / 5 = 500 + 200 = 700 + let expected_avg = 500.0 + (1500.0 - 500.0) / 5.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_scrape_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial scrapes handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + // Simulate 4 IPv4 scrapes + for _ in 0..4 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + } + + // Set initial average to 800ns + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 800.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1200ns + let processing_time = Duration::from_nanos(1200); + let new_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes + // 800 + (1200 - 800) / 4 = 800 + 100 = 900 + let expected_avg = 800.0 + (1200.0 - 800.0) / 4.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn recalculate_average_methods_should_handle_zero_connections_gracefully() { + let repo = Repository::new(); + + // Test with zero connections (should not panic, should handle division by zero) + let processing_time = Duration::from_nanos(1000); + + let connect_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + + // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 + // This should handle the division by zero case gracefully + assert!(connect_avg.is_infinite() || connect_avg.is_nan()); + assert!(announce_avg.is_infinite() || announce_avg.is_nan()); + assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); + } + + #[tokio::test] + async fn it_should_handle_concurrent_access() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Spawn multiple concurrent tasks + let mut handles = vec![]; + + for i in 0..10 { + let repo_clone = repo.clone(); + let handle = tokio::spawn(async move { + for _ in 0..5 { + repo_clone + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + } + i + }); + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await.unwrap(); + } + + // Verify all increments were properly recorded + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 50); // 10 tasks * 5 increments each + } + + #[tokio::test] + async fn it_should_handle_large_processing_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up a connection + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + + // Test with very large processing time + let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns + let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(large_duration).await; + + // Should handle large numbers without overflow + assert!(new_avg > 0.0); + assert!(new_avg.is_finite()); + } + + #[tokio::test] + async fn it_should_maintain_consistency_across_operations() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Perform a series of operations + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + 10.0, + now, + ) + .await + .unwrap(); + + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + // Check final state + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_banned_ips_total(), 10); + assert_eq!(stats.udp_requests_banned(), 1); + } + + #[tokio::test] + async fn it_should_handle_error_cases_gracefully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Test with invalid metric name (this should still work as metrics are created dynamically) + let result = repo + .increase_counter(&metric_name!("non_existent_metric"), &LabelSet::empty(), now) + .await; + + // Should succeed as metrics are created on demand + assert!(result.is_ok()); + + // Test with NaN value for gauge + let result = repo + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + f64::NAN, + now, + ) + .await; + + // Should handle NaN values + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` + // when no connections have been recorded yet. The first call should + // handle division by zero gracefully and return an infinite average, + // which is the current behavior. + + // todo: the first average should be 2000ns, not infinity. + // This is because the first connection is not counted in the average + // calculation if the counter is increased after calculating the average. + // The problem is that we count requests when they are accepted, not + // when they are processed. And we calculate the average when the + // response is sent. + + // First calculation: no connections recorded yet, should result in infinity + let processing_time_1 = Duration::from_nanos(2000); + let avg_1 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_1).await; + + // Division by zero: 1000 + (2000 - 1000) / 0 = infinity + assert!( + avg_1.is_infinite(), + "First calculation should be infinite due to division by zero" + ); + + // Now add one connection and try again + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + + // Second calculation: 1 connection, but previous average is infinity + let processing_time_2 = Duration::from_nanos(3000); + let avg_2 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_2).await; + + assert!( + (avg_2 - 3000.0).abs() < f64::EPSILON, + "Second calculation should be 3000ns, but got {avg_2}" + ); + } +} From 7e9d9827f1933d2774cce03eb59b47632214a8d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 12:15:01 +0100 Subject: [PATCH 181/247] fix(udt-tracker-server): metric description --- packages/udp-tracker-server/src/statistics/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index b42a73f27..768722ba3 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -73,9 +73,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), - Some(MetricDescription::new( - "Average time to process a UDP connect request in nanoseconds", - )), + Some(MetricDescription::new("Average time to process a UDP request in nanoseconds")), ); metrics From bf9d16a83ec48d2b60074fdc97b93f7c58bb5944 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 12:33:52 +0100 Subject: [PATCH 182/247] tests(udp-tracker-server): [#1589] add unit tests to statistics::metrics::Metrics --- cSpell.json | 1 + .../src/statistics/metrics.rs | 781 ++++++++++++++++++ 2 files changed, 782 insertions(+) diff --git a/cSpell.json b/cSpell.json index 647dd24a2..76939c199 100644 --- a/cSpell.json +++ b/cSpell.json @@ -175,6 +175,7 @@ "trackerid", "Trackon", "typenum", + "udpv", "Unamed", "underflows", "Unsendable", diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index c50966bc6..3c162ff02 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -276,3 +276,784 @@ impl Metrics { .unwrap_or_default() as u64 } } + +#[cfg(test)] +mod tests { + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, + }; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let metrics = Metrics::default(); + // MetricCollection starts with empty collections + assert_eq!(metrics, Metrics::default()); + } + + #[test] + fn it_should_implement_debug() { + let metrics = Metrics::default(); + let debug_string = format!("{metrics:?}"); + assert!(debug_string.contains("Metrics")); + assert!(debug_string.contains("metric_collection")); + } + + #[test] + fn it_should_implement_partial_eq() { + let metrics1 = Metrics::default(); + let metrics2 = Metrics::default(); + assert_eq!(metrics1, metrics2); + } + + #[test] + fn it_should_increase_counter_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_increase_counter_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + let result = metrics.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1000.0, + now, + ); + + assert!(result.is_ok()); + } + + mod udp_general_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_requests_aborted_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_aborted(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_aborted() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + + assert_eq!(metrics.udp_requests_aborted(), 2); + } + + #[test] + fn it_should_return_zero_for_udp_requests_banned_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_banned(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_banned() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_banned(), 3); + } + + #[test] + fn it_should_return_zero_for_udp_banned_ips_total_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_banned_ips_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 10); + } + } + + mod udp_performance_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_connect_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_announce_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "announce")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 2500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 2500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_scrape_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "scrape")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 3500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 3500); + } + } + + mod udpv4_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp4_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_requests(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests(), 5); + } + + #[test] + fn it_should_return_zero_for_udp4_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_connections_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 3); + } + + #[test] + fn it_should_return_zero_for_udp4_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_announces_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_announces_handled(), 7); + } + + #[test] + fn it_should_return_zero_for_udp4_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_scrapes_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_scrapes_handled(), 4); + } + + #[test] + fn it_should_return_zero_for_udp4_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_responses(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_responses(), 6); + } + + #[test] + fn it_should_return_zero_for_udp4_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_errors_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..2 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_errors_handled(), 2); + } + } + + mod udpv6_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp6_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_requests(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..8 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_requests(), 8); + } + + #[test] + fn it_should_return_zero_for_udp6_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_connections_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_connections_handled(), 4); + } + + #[test] + fn it_should_return_zero_for_udp6_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_announces_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + for _ in 0..9 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_announces_handled(), 9); + } + + #[test] + fn it_should_return_zero_for_udp6_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_scrapes_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_scrapes_handled(), 6); + } + + #[test] + fn it_should_return_zero_for_udp6_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_responses(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..11 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_responses(), 11); + } + + #[test] + fn it_should_return_zero_for_udp6_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_errors_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_errors_handled(), 3); + } + } + + mod combined_metrics { + use super::*; + + #[test] + fn it_should_distinguish_between_ipv4_and_ipv6_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add different counts for IPv4 and IPv6 + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv4_labels, now) + .unwrap(); + } + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv6_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests(), 3); + assert_eq!(metrics.udp6_requests(), 7); + } + + #[test] + fn it_should_distinguish_between_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let announce_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let scrape_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + // Add different counts for different request kinds + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..1 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &scrape_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 2); + assert_eq!(metrics.udp4_announces_handled(), 5); + assert_eq!(metrics.udp4_scrapes_handled(), 1); + } + + #[test] + fn it_should_handle_mixed_ipv4_and_ipv6_for_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let ipv6_connect_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + let ipv4_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let ipv6_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + // Add mixed IPv4/IPv6 counts + for _ in 0..3 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..4 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..6 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_announce_labels, + now, + ) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 3); + assert_eq!(metrics.udp6_connections_handled(), 2); + assert_eq!(metrics.udp4_announces_handled(), 4); + assert_eq!(metrics.udp6_announces_handled(), 6); + } + } + + mod edge_cases { + use super::*; + + #[test] + fn it_should_handle_large_counter_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Add a large number of increments + for _ in 0..1000 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_aborted(), 1000); + } + + #[test] + fn it_should_handle_large_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set a large gauge value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 999_999.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 999_999); + } + + #[test] + fn it_should_handle_zero_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 0.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_handle_fractional_gauge_values_with_truncation() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1234.567, + now, + ) + .unwrap(); + + // Should truncate to 1234 + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1234); + } + + #[test] + fn it_should_overwrite_gauge_values_when_set_multiple_times() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set initial value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 50.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 50); + + // Overwrite with new value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 75.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 75); + } + + #[test] + fn it_should_handle_empty_label_sets() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let empty_labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &empty_labels, now); + + assert!(result.is_ok()); + assert_eq!(metrics.udp_requests_aborted(), 1); + } + + #[test] + fn it_should_handle_multiple_labels_on_same_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let labels1 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels2 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add to same metric with different labels + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels1, now) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels2, now) + .unwrap(); + } + + // Should return labeled sums correctly + assert_eq!(metrics.udp4_requests(), 3); + assert_eq!(metrics.udp6_requests(), 5); + } + } + + mod error_handling { + use super::*; + + #[test] + fn it_should_return_ok_result_for_valid_counter_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_return_ok_result_for_valid_gauge_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_handle_unknown_metric_names_gracefully() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // This should still work as metrics are created on demand + let result = metrics.increase_counter(&metric_name!("unknown_metric"), &labels, now); + + assert!(result.is_ok()); + } + } +} From 520fd8b6deb9d7ec5cc943ca622267565af304dd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 20:04:50 +0100 Subject: [PATCH 183/247] chore: [#1589] add debug logs for avg processing time metric update --- .../src/statistics/event/handler/response_sent.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7e05e483b..e76d67a4e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -19,6 +19,9 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) .await; + + tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository @@ -39,6 +42,12 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) .await; + + tracing::debug!( + "Updating average processing time metric for announce requests: {} ns", + new_avg + ); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository @@ -59,6 +68,9 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) .await; + + tracing::debug!("Updating average processing time metric for scrape requests: {} ns", new_avg); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository From e6c05b6886e241dbf6f2472d41b2c0cc47739756 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 10:30:54 +0100 Subject: [PATCH 184/247] refactor(udp-tracker-server): [#1589] move average processing time calculation from Repository to Metrics --- .../src/statistics/metrics.rs | 67 +++++++++++++++++++ .../src/statistics/repository.rs | 58 +--------------- 2 files changed, 70 insertions(+), 55 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 3c162ff02..e0ca0aaaf 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; @@ -48,6 +50,71 @@ impl Metrics { } impl Metrics { + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; + + let previous_avg = self.udp_avg_connect_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + + tracing::debug!( + "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_connections_handled + ); + + new_avg + } + + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; + + let previous_avg = self.udp_avg_announce_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + + tracing::debug!( + "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_announces_handled + ); + + new_avg + } + + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; + + let previous_avg = self.udp_avg_scrape_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + + tracing::debug!( + "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_scrapes_handled + ); + + new_avg + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index eb0951614..2d081767e 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,85 +73,33 @@ impl Repository { result } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled() + stats_lock.udp6_connections_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_connections_handled - ); - new_avg } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - - let udp_announces_handled = (stats_lock.udp4_announces_handled() + stats_lock.udp6_announces_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_announces_handled - ); - new_avg } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled() + stats_lock.udp6_scrapes_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_scrapes_handled - ); - new_avg } } From d50948ea1a5a311605adba930d464c3334835df1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 11:19:57 +0100 Subject: [PATCH 185/247] refactor: [#1598] make recalculate udp avg connect processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 23 ++------ .../src/statistics/metrics.rs | 31 +++++++++-- .../src/statistics/repository.rs | 53 ++++++++++++++----- 3 files changed, 73 insertions(+), 34 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index e76d67a4e..7b271f872 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -16,26 +16,13 @@ pub async fn handle_event( let (result_label_value, kind_label_value) = match kind { UdpResponseKind::Ok { req_kind } => match req_kind { UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - - tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } UdpRequestKind::Announce { announce_request } => { diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index e0ca0aaaf..61902dbba 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -51,14 +51,23 @@ impl Metrics { impl Metrics { #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_connect_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; let previous_avg = self.udp_avg_connect_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + let new_avg = if udp_connections_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled + }; tracing::debug!( "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", @@ -68,9 +77,25 @@ impl Metrics { udp_connections_handled ); + self.update_udp_avg_connect_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_connect_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 2d081767e..cb6979a83 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,10 +73,15 @@ impl Repository { result } - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_connect_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -338,7 +343,9 @@ mod tests { // Calculate new average with processing time of 2000ns let processing_time = Duration::from_nanos(2000); - let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_connections // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 @@ -436,17 +443,25 @@ mod tests { #[tokio::test] async fn recalculate_average_methods_should_handle_zero_connections_gracefully() { let repo = Repository::new(); + let now = CurrentClock::now(); // Test with zero connections (should not panic, should handle division by zero) let processing_time = Duration::from_nanos(1000); - let connect_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let connect_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .await; + + let _announce_labels = LabelSet::from([("request_kind", "announce")]); let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + + let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully - assert!(connect_avg.is_infinite() || connect_avg.is_nan()); + assert!((connect_avg - 1000.0).abs() < f64::EPSILON); assert!(announce_avg.is_infinite() || announce_avg.is_nan()); assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); } @@ -500,7 +515,10 @@ mod tests { // Test with very large processing time let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns - let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(large_duration).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let new_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(large_duration, &connect_labels, now) + .await; // Should handle large numbers without overflow assert!(new_avg > 0.0); @@ -575,6 +593,7 @@ mod tests { #[tokio::test] async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { let repo = Repository::new(); + let connect_labels = LabelSet::from([("request_kind", "connect")]); let now = CurrentClock::now(); // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` @@ -591,12 +610,13 @@ mod tests { // First calculation: no connections recorded yet, should result in infinity let processing_time_1 = Duration::from_nanos(2000); - let avg_1 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_1).await; + let avg_1 = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time_1, &connect_labels, now) + .await; - // Division by zero: 1000 + (2000 - 1000) / 0 = infinity assert!( - avg_1.is_infinite(), - "First calculation should be infinite due to division by zero" + (avg_1 - 2000.0).abs() < f64::EPSILON, + "First calculation should be 2000, but got {avg_1}" ); // Now add one connection and try again @@ -605,10 +625,17 @@ mod tests { .await .unwrap(); - // Second calculation: 1 connection, but previous average is infinity + // Second calculation: 1 connection let processing_time_2 = Duration::from_nanos(3000); - let avg_2 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_2).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let avg_2 = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time_2, &connect_labels, now) + .await; + // There is one connection, so the average should be: + // 2000 + (3000 - 2000) / 1 = 2000 + 1000 = 3000 + // This is because one connection is not counted yet in the average calculation, + // so the average is simply the processing time of the second connection. assert!( (avg_2 - 3000.0).abs() < f64::EPSILON, "Second calculation should be 3000ns, but got {avg_2}" From 59fbb39974fe731d5b6bc8dc50cee29816058780 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 11:32:50 +0100 Subject: [PATCH 186/247] refactor: [#1598] make recalculate udp avg announce processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 26 ++-------- .../src/statistics/metrics.rs | 51 ++++++++++++------- .../src/statistics/repository.rs | 23 ++++++--- 3 files changed, 54 insertions(+), 46 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7b271f872..3258a7023 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -26,29 +26,13 @@ pub async fn handle_event( (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } UdpRequestKind::Announce { announce_request } => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - - tracing::debug!( - "Updating average processing time metric for announce requests: {} ns", - new_avg - ); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 61902dbba..cef1c2824 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -77,35 +77,30 @@ impl Metrics { udp_connections_handled ); - self.update_udp_avg_connect_processing_time_ns(new_avg, label_set, now); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); new_avg } - fn update_udp_avg_connect_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { - tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); - - match self.set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - label_set, - new_avg, - now, - ) { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - } - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_announce_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; let previous_avg = self.udp_avg_announce_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + let new_avg = if udp_announces_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled + }; tracing::debug!( "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", @@ -115,9 +110,29 @@ impl Metrics { udp_announces_handled ); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!( + "Updating average processing time metric to {} ns for label set {}", + new_avg, + label_set, + ); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index cb6979a83..024ff4535 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -88,10 +88,15 @@ impl Repository { new_avg } - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_announce_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -390,7 +395,9 @@ mod tests { // Calculate new average with processing time of 1500ns let processing_time = Duration::from_nanos(1500); - let new_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_announces // 500 + (1500 - 500) / 5 = 500 + 200 = 700 @@ -453,8 +460,10 @@ mod tests { .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) .await; - let _announce_labels = LabelSet::from([("request_kind", "announce")]); - let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let announce_labels = LabelSet::from([("request_kind", "announce")]); + let announce_avg = repo + .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .await; let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; @@ -462,7 +471,7 @@ mod tests { // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully assert!((connect_avg - 1000.0).abs() < f64::EPSILON); - assert!(announce_avg.is_infinite() || announce_avg.is_nan()); + assert!((announce_avg - 1000.0).abs() < f64::EPSILON); assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); } From 47c294987725dba83363460c68222f914efcb698 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 12:05:58 +0100 Subject: [PATCH 187/247] refactor: [#1598] make recalculate udp avg scrape processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 27 +++------- .../src/statistics/metrics.rs | 53 +++++++++++-------- .../src/statistics/repository.rs | 23 +++++--- 3 files changed, 55 insertions(+), 48 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 3258a7023..7594d16f2 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -4,7 +4,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ConnectionContext, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; -use crate::statistics::{UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL}; +use crate::statistics::UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL; pub async fn handle_event( context: ConnectionContext, @@ -36,33 +36,20 @@ pub async fn handle_event( (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - - tracing::debug!("Updating average processing time metric for scrape requests: {} ns", new_avg); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) } }, UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), }; - // Extendable metrics + // Increase the number of responses sent let mut label_set = LabelSet::from(context); if result_label_value == LabelValue::new("ok") { label_set.upsert(label_name!("request_kind"), kind_label_value); diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index cef1c2824..eedd1a02f 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -115,34 +115,25 @@ impl Metrics { new_avg } - fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { - tracing::debug!( - "Updating average processing time metric to {} ns for label set {}", - new_avg, - label_set, - ); - - match self.set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - label_set, - new_avg, - now, - ) { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - } - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_scrape_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; let previous_avg = self.udp_avg_scrape_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + let new_avg = if udp_scrapes_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled + }; tracing::debug!( "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", @@ -152,9 +143,29 @@ impl Metrics { udp_scrapes_handled ); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!( + "Updating average processing time metric to {} ns for label set {}", + new_avg, + label_set, + ); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 024ff4535..c9b3d0548 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -103,10 +103,15 @@ impl Repository { new_avg } - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_scrape_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -436,7 +441,9 @@ mod tests { // Calculate new average with processing time of 1200ns let processing_time = Duration::from_nanos(1200); - let new_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes // 800 + (1200 - 800) / 4 = 800 + 100 = 900 @@ -465,14 +472,16 @@ mod tests { .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) .await; - let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); - let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + let scrape_avg = repo + .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully assert!((connect_avg - 1000.0).abs() < f64::EPSILON); assert!((announce_avg - 1000.0).abs() < f64::EPSILON); - assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); + assert!((scrape_avg - 1000.0).abs() < f64::EPSILON); } #[tokio::test] From 1c13b12c7cf6c4f109cebea8e8c85ccebb1f99c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 12:27:16 +0100 Subject: [PATCH 188/247] fix: [#1589] partially. Moving average calculated for each time series We can't count the total number of UDP requests while calculating the moving average but updating it only for a concrete label set (time series). Averages are calculate for each label set. They could be aggregated by caclulating the average for all time series. --- .../src/statistics/metrics.rs | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index eedd1a02f..8e32c1f4c 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -58,15 +58,16 @@ impl Metrics { now: DurationSinceUnixEpoch, ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; - let previous_avg = self.udp_avg_connect_processing_time_ns(); + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let new_avg = if udp_connections_handled == 0.0 { + let previous_avg = self.udp_avg_processing_time_ns(label_set); + + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -74,7 +75,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_connections_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -91,15 +92,15 @@ impl Metrics { ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let previous_avg = self.udp_avg_announce_processing_time_ns(); + let previous_avg = self.udp_avg_processing_time_ns(label_set); - let new_avg = if udp_announces_handled == 0.0 { + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -107,7 +108,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_announces_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -124,15 +125,15 @@ impl Metrics { ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let previous_avg = self.udp_avg_scrape_processing_time_ns(); + let previous_avg = self.udp_avg_processing_time_ns(label_set); - let new_avg = if udp_scrapes_handled == 0.0 { + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -140,7 +141,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_scrapes_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -148,6 +149,27 @@ impl Metrics { new_avg } + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + ) + .unwrap_or_default() as u64 + } + + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_request_accepted(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), label_set) + .unwrap_or_default() as u64 + } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { tracing::debug!( "Updating average processing time metric to {} ns for label set {}", From 164de924999367b6fb714c2ecea38da7ad99b0fb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 13:59:55 +0100 Subject: [PATCH 189/247] refactor: [#1589] remvoe duplicate code --- .../statistics/event/handler/response_sent.rs | 6 +- .../src/statistics/metrics.rs | 71 +------------------ .../src/statistics/repository.rs | 52 +++----------- 3 files changed, 17 insertions(+), 112 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7594d16f2..34093f511 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -20,7 +20,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), UdpRequestKind::Connect.into()) @@ -30,7 +30,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) @@ -40,7 +40,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 8e32c1f4c..bfed16c47 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -51,7 +51,7 @@ impl Metrics { impl Metrics { #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_connect_processing_time_ns( + pub fn recalculate_udp_avg_processing_time_ns( &mut self, req_processing_time: Duration, label_set: &LabelSet, @@ -71,73 +71,8 @@ impl Metrics { }; tracing::debug!( - "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", - new_avg, - previous_avg, - req_processing_time, - request_accepted_total - ); - - self.update_udp_avg_processing_time_ns(new_avg, label_set, now); - - new_avg - } - - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_announce_processing_time_ns( - &mut self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; - - let previous_avg = self.udp_avg_processing_time_ns(label_set); - - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; - - tracing::debug!( - "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", - new_avg, - previous_avg, - req_processing_time, - request_accepted_total - ); - - self.update_udp_avg_processing_time_ns(new_avg, label_set, now); - - new_avg - } - - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_scrape_processing_time_ns( - &mut self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; - - let previous_avg = self.udp_avg_processing_time_ns(label_set); - - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; - - tracing::debug!( - "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_accepted_total: {})", + label_set, new_avg, previous_avg, req_processing_time, diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index c9b3d0548..6695bbfbc 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,7 +73,7 @@ impl Repository { result } - pub async fn recalculate_udp_avg_connect_processing_time_ns( + pub async fn recalculate_udp_avg_processing_time_ns( &self, req_processing_time: Duration, label_set: &LabelSet, @@ -81,37 +81,7 @@ impl Repository { ) -> f64 { let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time, label_set, now); - - drop(stats_lock); - - new_avg - } - - pub async fn recalculate_udp_avg_announce_processing_time_ns( - &self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let mut stats_lock = self.stats.write().await; - - let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time, label_set, now); - - drop(stats_lock); - - new_avg - } - - pub async fn recalculate_udp_avg_scrape_processing_time_ns( - &self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let mut stats_lock = self.stats.write().await; - - let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, label_set, now); + let new_avg = stats_lock.recalculate_udp_avg_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -354,7 +324,7 @@ mod tests { // Calculate new average with processing time of 2000ns let processing_time = Duration::from_nanos(2000); let new_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_connections @@ -401,7 +371,7 @@ mod tests { // Calculate new average with processing time of 1500ns let processing_time = Duration::from_nanos(1500); let new_avg = repo - .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_announces @@ -442,7 +412,7 @@ mod tests { // Calculate new average with processing time of 1200ns let processing_time = Duration::from_nanos(1200); let new_avg = repo - .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes @@ -464,17 +434,17 @@ mod tests { let connect_labels = LabelSet::from([("request_kind", "connect")]); let connect_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; let announce_labels = LabelSet::from([("request_kind", "announce")]); let announce_avg = repo - .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; let scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo - .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 @@ -535,7 +505,7 @@ mod tests { let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns let connect_labels = LabelSet::from([("request_kind", "connect")]); let new_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(large_duration, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(large_duration, &connect_labels, now) .await; // Should handle large numbers without overflow @@ -629,7 +599,7 @@ mod tests { // First calculation: no connections recorded yet, should result in infinity let processing_time_1 = Duration::from_nanos(2000); let avg_1 = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time_1, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) .await; assert!( @@ -647,7 +617,7 @@ mod tests { let processing_time_2 = Duration::from_nanos(3000); let connect_labels = LabelSet::from([("request_kind", "connect")]); let avg_2 = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time_2, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) .await; // There is one connection, so the average should be: From ed5f1e69de7fc05a87250614425b562fb7db67b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 22:28:30 +0100 Subject: [PATCH 190/247] fix: [#1589] add dedicated metric for UDP request processing in moving average calculation Add a new metric `UDP_TRACKER_SERVER_PERFORMANCE_PROCESSED_REQUESTS_TOTAL` to track requests processed specifically for performance metrics, eliminating race conditions in the moving average calculation. **Changes:** - Add new metric constant `UDP_TRACKER_SERVER_PERFORMANCE_PROCESSED_REQUESTS_TOTAL` - Update `recalculate_udp_avg_processing_time_ns()` to use dedicated counter instead of accepted requests total - Add `udp_processed_requests_total()` method to retrieve the new metric value - Add `increment_udp_processed_requests_total()` helper method - Update metric descriptions to include the new counter **Problem Fixed:** Previously, the moving average calculation used the accepted requests counter that could be updated independently, causing race conditions where the same request count was used for multiple calculations. The new implementation increments its own dedicated counter atomically during the calculation, ensuring consistency. **Behavior Change:** The counter now starts at 0 and gets incremented to 1 on the first calculation call, then uses proper moving average formula for subsequent calls. This eliminates division by zero issues and provides more accurate moving averages. **Tests Updated:** Updated repository tests to reflect the new atomic behavior where the processed requests counter is managed specifically for moving average calculations. Fixes race conditions in UDP request processing time metrics while maintaining backward compatibility of all public APIs. --- .../src/statistics/metrics.rs | 78 ++++++++++--- .../udp-tracker-server/src/statistics/mod.rs | 10 ++ .../src/statistics/repository.rs | 106 +++++------------- 3 files changed, 103 insertions(+), 91 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index bfed16c47..e7653815f 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -9,7 +9,8 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::statistics::{ - UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, @@ -57,26 +58,22 @@ impl Metrics { label_set: &LabelSet, now: DurationSinceUnixEpoch, ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; + self.increment_udp_processed_requests_total(label_set, now); + let processed_requests_total = self.udp_processed_requests_total(label_set) as f64; let previous_avg = self.udp_avg_processing_time_ns(label_set); + let req_processing_time = req_processing_time.as_nanos() as f64; - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / processed_requests_total; tracing::debug!( - "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_accepted_total: {})", + "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_processed_total: {})", label_set, new_avg, previous_avg, req_processing_time, - request_accepted_total + processed_requests_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -105,6 +102,18 @@ impl Metrics { .unwrap_or_default() as u64 } + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + ) + .unwrap_or_default() as u64 + } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { tracing::debug!( "Updating average processing time metric to {} ns for label set {}", @@ -123,6 +132,19 @@ impl Metrics { } } + fn increment_udp_processed_requests_total(&mut self, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!("Incrementing processed requests total for label set {}", label_set,); + + match self.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment counter: {}", err), + } + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] @@ -360,9 +382,10 @@ mod tests { use super::*; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, - UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, }; use crate::CurrentClock; @@ -437,6 +460,31 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn it_should_return_zero_for_udp_processed_requests_total_when_no_data() { + let metrics = Metrics::default(); + let labels = LabelSet::from([("request_kind", "connect")]); + assert_eq!(metrics.udp_processed_requests_total(&labels), 0); + } + + #[test] + fn it_should_increment_processed_requests_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + // Directly increment the counter using the public method + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &labels, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_processed_requests_total(&labels), 1); + } + mod udp_general_metrics { use super::*; diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 768722ba3..6bd35b9a1 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -17,6 +17,8 @@ pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server pub const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; pub const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL: &str = + "udp_tracker_server_performance_avg_processed_requests_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -76,5 +78,13 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("Average time to process a UDP request in nanoseconds")), ); + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "Total number of UDP requests processed for the average performance metrics", + )), + ); + metrics } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 6695bbfbc..1ab2cc6a7 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -295,21 +295,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial connections handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); - let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); - - // Simulate 2 IPv4 and 1 IPv6 connections - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) - .await - .unwrap(); - // Set initial average to 1000ns let connect_labels = LabelSet::from([("request_kind", "connect")]); repo.set_gauge( @@ -322,14 +307,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 2000ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(2000); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_connections - // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 - let expected_avg = 1000.0 + (2000.0 - 1000.0) / 3.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 1000 + (2000 - 1000) / 1 = 1000 + 1000 = 2000 + let expected_avg = 1000.0 + (2000.0 - 1000.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -341,22 +328,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial announces handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); - let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); - - // Simulate 3 IPv4 and 2 IPv6 announces - for _ in 0..3 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - } - for _ in 0..2 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) - .await - .unwrap(); - } - // Set initial average to 500ns let announce_labels = LabelSet::from([("request_kind", "announce")]); repo.set_gauge( @@ -369,14 +340,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 1500ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(1500); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_announces - // 500 + (1500 - 500) / 5 = 500 + 200 = 700 - let expected_avg = 500.0 + (1500.0 - 500.0) / 5.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 500 + (1500 - 500) / 1 = 500 + 1000 = 1500 + let expected_avg = 500.0 + (1500.0 - 500.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -388,16 +361,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial scrapes handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); - - // Simulate 4 IPv4 scrapes - for _ in 0..4 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - } - // Set initial average to 800ns let scrape_labels = LabelSet::from([("request_kind", "scrape")]); repo.set_gauge( @@ -410,14 +373,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 1200ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(1200); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes - // 800 + (1200 - 800) / 4 = 800 + 100 = 900 - let expected_avg = 800.0 + (1200.0 - 800.0) / 4.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 800 + (1200 - 800) / 1 = 800 + 400 = 1200 + let expected_avg = 800.0 + (1200.0 - 800.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -584,49 +549,38 @@ mod tests { let connect_labels = LabelSet::from([("request_kind", "connect")]); let now = CurrentClock::now(); - // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` - // when no connections have been recorded yet. The first call should - // handle division by zero gracefully and return an infinite average, - // which is the current behavior. + // This test checks the behavior of `recalculate_udp_avg_processing_time_ns` + // when no processed requests have been recorded yet. The first call should + // handle division by zero gracefully and set the first average to the + // processing time of the first request. - // todo: the first average should be 2000ns, not infinity. - // This is because the first connection is not counted in the average - // calculation if the counter is increased after calculating the average. - // The problem is that we count requests when they are accepted, not - // when they are processed. And we calculate the average when the - // response is sent. - - // First calculation: no connections recorded yet, should result in infinity + // First calculation: no processed requests recorded yet let processing_time_1 = Duration::from_nanos(2000); let avg_1 = repo .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) .await; + // The first average should be the first processing time since processed_requests_total is 0 + // When processed_requests_total == 0.0, new_avg = req_processing_time assert!( (avg_1 - 2000.0).abs() < f64::EPSILON, "First calculation should be 2000, but got {avg_1}" ); - // Now add one connection and try again - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - - // Second calculation: 1 connection + // Second calculation: now we have one processed request (incremented during first call) let processing_time_2 = Duration::from_nanos(3000); - let connect_labels = LabelSet::from([("request_kind", "connect")]); let avg_2 = repo .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) .await; - // There is one connection, so the average should be: - // 2000 + (3000 - 2000) / 1 = 2000 + 1000 = 3000 - // This is because one connection is not counted yet in the average calculation, - // so the average is simply the processing time of the second connection. + // Moving average calculation: previous_avg + (new_value - previous_avg) / processed_requests_total + // After first call: processed_requests_total = 1, avg = 2000 + // During second call: processed_requests_total incremented to 2 + // new_avg = 2000 + (3000 - 2000) / 2 = 2000 + 500 = 2500 + let expected_avg_2 = 2000.0 + (3000.0 - 2000.0) / 2.0; assert!( - (avg_2 - 3000.0).abs() < f64::EPSILON, - "Second calculation should be 3000ns, but got {avg_2}" + (avg_2 - expected_avg_2).abs() < f64::EPSILON, + "Second calculation should be {expected_avg_2}ns, but got {avg_2}" ); } } From 384b887fa2790413cd189c169c047f5ceebcbe4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 07:57:48 +0100 Subject: [PATCH 191/247] feat(metrics): [#1589] add Avg (average) aggregate function Implements a new aggregate function for calculating averages of metric samples that match specific label criteria, complementing the existing Sum aggregation. - **metrics/src/metric/aggregate/avg.rs**: New metric-level average trait and implementations - `Avg` trait with `avg()` method for calculating averages - Implementation for `Metric` returning `f64` - Implementation for `Metric` returning `f64` - Comprehensive unit tests with edge cases (empty samples, large values, etc.) - **metrics/src/metric_collection/aggregate/avg.rs**: New collection-level average trait - `Avg` trait for `MetricCollection` and `MetricKindCollection` - Delegates to metric-level implementations - Handles mixed counter/gauge collections by trying counters first, then gauges - Returns `None` for non-existent metrics - Comprehensive test suite covering various scenarios - **metrics/src/metric/aggregate/mod.rs**: Export new `avg` module - **metrics/src/metric_collection/aggregate/mod.rs**: Export new `avg` module - **metrics/README.md**: Add example usage of the new `Avg` trait in the aggregation section - **Type Safety**: Returns appropriate types (`f64` for both counters and gauges) - **Label Filtering**: Supports filtering samples by label criteria like existing `Sum` - **Edge Case Handling**: Returns `0.0` for empty sample sets - **Performance**: Uses iterator chains for efficient sample processing - **Comprehensive Testing**: 205 tests pass including new avg functionality ```rust use torrust_tracker_metrics::metric_collection::aggregate::Avg; // Calculate average of all matching samples let avg_value = metrics.avg(&metric_name, &label_criteria); ``` The implementation follows the same patterns as the existing `Sum` aggregate function, ensuring consistency in the codebase and maintaining the same level of type safety and performance characteristics. --- packages/metrics/README.md | 10 +- packages/metrics/src/metric/aggregate/avg.rs | 307 ++++++++++++++++++ packages/metrics/src/metric/aggregate/mod.rs | 1 + .../src/metric_collection/aggregate/avg.rs | 214 ++++++++++++ .../src/metric_collection/aggregate/mod.rs | 1 + 5 files changed, 532 insertions(+), 1 deletion(-) create mode 100644 packages/metrics/src/metric/aggregate/avg.rs create mode 100644 packages/metrics/src/metric_collection/aggregate/avg.rs diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 9f3883fba..3d1d94c5f 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -67,7 +67,7 @@ println!("{}", prometheus_output); ### Metric Aggregation ```rust -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::{Sum, Avg}; // Sum all counter values matching specific labels let total_requests = metrics.sum( @@ -76,6 +76,14 @@ let total_requests = metrics.sum( ); println!("Total requests: {:?}", total_requests); + +// Calculate average of gauge values matching specific labels +let avg_response_time = metrics.avg( + &metric_name!("response_time_seconds"), + &[("endpoint", "/announce")].into(), +); + +println!("Average response time: {:?}", avg_response_time); ``` ## Architecture diff --git a/packages/metrics/src/metric/aggregate/avg.rs b/packages/metrics/src/metric/aggregate/avg.rs new file mode 100644 index 000000000..e1882ea68 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/avg.rs @@ -0,0 +1,307 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::Metric; + +pub trait Avg { + type Output; + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output; +} + +impl Avg for Metric { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples: Vec<_> = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect(); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum: u64 = matching_samples + .iter() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + #[allow(clippy::cast_precision_loss)] + (sum as f64 / matching_samples.len() as f64) + } +} + +impl Avg for Metric { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples: Vec<_> = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect(); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum: f64 = matching_samples + .iter() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + #[allow(clippy::cast_precision_loss)] + (sum / matching_samples.len() as f64) + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::avg::Avg; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec>, + } + + impl Default for MetricBuilder { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl MetricBuilder { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(3.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1 + 3) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2 + 4 + 6) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample((u64::MAX / 2).into(), &[("edge", "large1")].into()) + .with_sample((u64::MAX / 2).into(), &[("edge", "large2")].into()) + .build(), + LabelSet::empty(), + #[allow(clippy::cast_precision_loss)] + (u64::MAX as f64 / 2.0), // Average of (max/2) and (max/2) + ), + ] + } + + fn gauge_cases() -> Vec<(Metric, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(3.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.0 + 3.0) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.0.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2.0 + 4.0 + 6.0) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(4.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0, // (-2.0 + 4.0) / 2 = 1.0 + ), + // Edge: Metric with decimal values + ( + MetricBuilder::default() + .with_sample(1.5.into(), &[("l6", "l6_value")].into()) + .with_sample(2.5.into(), &[("l7", "l7_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.5 + 2.5) / 2 = 2.0 + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } +} diff --git a/packages/metrics/src/metric/aggregate/mod.rs b/packages/metrics/src/metric/aggregate/mod.rs index dce785d95..1224a1f52 100644 --- a/packages/metrics/src/metric/aggregate/mod.rs +++ b/packages/metrics/src/metric/aggregate/mod.rs @@ -1 +1,2 @@ +pub mod avg; pub mod sum; diff --git a/packages/metrics/src/metric_collection/aggregate/avg.rs b/packages/metrics/src/metric_collection/aggregate/avg.rs new file mode 100644 index 000000000..936754fc4 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/avg.rs @@ -0,0 +1,214 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::avg::Avg as MetricAvgTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Avg { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; +} + +impl Avg for MetricCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + if let Some(value) = self.counters.avg(metric_name, label_set_criteria) { + return Some(value); + } + + if let Some(value) = self.gauges.avg(metric_name, label_set_criteria) { + return Some(value); + } + + None + } +} + +impl Avg for MetricKindCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics + .get(metric_name) + .map(|metric| metric.avg(label_set_criteria)) + } +} + +impl Avg for MetricKindCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_averaging_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::avg::Avg; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Two samples with value 1 each, average should be 1.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(1.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0) + ); + } + + #[test] + fn type_counter_with_different_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + // First increment: value goes from 0 to 1 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Second increment on the same label: value goes from 1 to 2 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(2), + ) + .unwrap(); + + // Create another counter with a different value + collection + .set_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4, + DurationSinceUnixEpoch::from_secs(3), + ) + .unwrap(); + + // Average of 2 and 4 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_2"), LabelValue::new("value_2")).into()), + Some(4.0) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + 2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of 2.0 and 4.0 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + } + + #[test] + fn type_gauge_with_negative_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + -2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 6.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of -2.0 and 6.0 should be 2.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(2.0)); + } + + #[test] + fn nonexistent_metric() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let collection = MetricCollection::default(); + + assert_eq!(collection.avg(&metric_name!("nonexistent"), &LabelSet::empty()), None); + } + } +} diff --git a/packages/metrics/src/metric_collection/aggregate/mod.rs b/packages/metrics/src/metric_collection/aggregate/mod.rs index dce785d95..1224a1f52 100644 --- a/packages/metrics/src/metric_collection/aggregate/mod.rs +++ b/packages/metrics/src/metric_collection/aggregate/mod.rs @@ -1 +1,2 @@ +pub mod avg; pub mod sum; From 8fbcf9024a39af498162a522ecbd107d01f239a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:30:27 +0100 Subject: [PATCH 192/247] refactor(metrics): extract collect_matching_samples to Metric impl Improve AI-generated code. Moves the collect_matching_samples helper method from individual aggregate implementations to the generic Metric implementation, making it reusable across all aggregate functions. - Add collect_matching_samples method to Metric for filtering samples by label criteria - Remove code duplication between Sum and Avg aggregate implementations - Improve code organization by centralizing sample collection logic - Maintain backward compatibility and all existing functionality This refactoring improves maintainability by providing a single, well-tested implementation of sample filtering that can be used by current and future aggregate functions. --- packages/metrics/src/metric/aggregate/avg.rs | 23 +++++--------------- packages/metrics/src/metric/mod.rs | 11 ++++++++++ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/packages/metrics/src/metric/aggregate/avg.rs b/packages/metrics/src/metric/aggregate/avg.rs index e1882ea68..95628450b 100644 --- a/packages/metrics/src/metric/aggregate/avg.rs +++ b/packages/metrics/src/metric/aggregate/avg.rs @@ -1,6 +1,7 @@ use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum; use crate::metric::Metric; pub trait Avg { @@ -12,20 +13,13 @@ impl Avg for Metric { type Output = f64; fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { - let matching_samples: Vec<_> = self - .sample_collection - .iter() - .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .collect(); + let matching_samples = self.collect_matching_samples(label_set_criteria); if matching_samples.is_empty() { return 0.0; } - let sum: u64 = matching_samples - .iter() - .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); + let sum = self.sum(label_set_criteria); #[allow(clippy::cast_precision_loss)] (sum as f64 / matching_samples.len() as f64) @@ -36,20 +30,13 @@ impl Avg for Metric { type Output = f64; fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { - let matching_samples: Vec<_> = self - .sample_collection - .iter() - .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .collect(); + let matching_samples = self.collect_matching_samples(label_set_criteria); if matching_samples.is_empty() { return 0.0; } - let sum: f64 = matching_samples - .iter() - .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); + let sum = self.sum(label_set_criteria); #[allow(clippy::cast_precision_loss)] (sum / matching_samples.len() as f64) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index d1aa01b94..6bc1a6075 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -78,6 +78,17 @@ impl Metric { pub fn is_empty(&self) -> bool { self.sample_collection.is_empty() } + + #[must_use] + pub fn collect_matching_samples( + &self, + label_set_criteria: &LabelSet, + ) -> Vec<(&crate::label::LabelSet, &crate::sample::Measurement)> { + self.sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect() + } } impl Metric { From f402b0250b846dfb62c8d8cb48ec5b175693f350 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:32:43 +0100 Subject: [PATCH 193/247] chore: remove deprecated comment --- .../rest-tracker-api-core/src/statistics/services.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index a8132d4fd..af79c5ce7 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -59,18 +59,6 @@ async fn get_protocol_metrics( let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; - /* - - todo: We have to delete the global metrics from Metric types: - - - bittorrent_http_tracker_core::statistics::metrics::Metrics - - bittorrent_udp_tracker_core::statistics::metrics::Metrics - - torrust_udp_tracker_server::statistics::metrics::Metrics - - Internally only the labeled metrics should be used. - - */ - // TCPv4 let tcp4_announces_handled = http_stats.tcp4_announces_handled(); From caa69ae91356584e193b11485548fb935bb4f2d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:33:43 +0100 Subject: [PATCH 194/247] test: [#1589] remove uneeded test Division by zero issues was solved. It can't happen now becuase we increase the counter at the beggining of the function. ```rust #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_processing_time_ns( &mut self, req_processing_time: Duration, label_set: &LabelSet, now: DurationSinceUnixEpoch, ) -> f64 { self.increment_udp_processed_requests_total(label_set, now); let processed_requests_total = self.udp_processed_requests_total(label_set) as f64; let previous_avg = self.udp_avg_processing_time_ns(label_set); let req_processing_time = req_processing_time.as_nanos() as f64; // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / processed_requests_total; tracing::debug!( "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_processed_total: {})", label_set, new_avg, previous_avg, req_processing_time, processed_requests_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); new_avg } ``` --- .../src/statistics/repository.rs | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1ab2cc6a7..85e3bbe64 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -542,45 +542,4 @@ mod tests { // Should handle NaN values assert!(result.is_ok()); } - - #[tokio::test] - async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { - let repo = Repository::new(); - let connect_labels = LabelSet::from([("request_kind", "connect")]); - let now = CurrentClock::now(); - - // This test checks the behavior of `recalculate_udp_avg_processing_time_ns` - // when no processed requests have been recorded yet. The first call should - // handle division by zero gracefully and set the first average to the - // processing time of the first request. - - // First calculation: no processed requests recorded yet - let processing_time_1 = Duration::from_nanos(2000); - let avg_1 = repo - .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) - .await; - - // The first average should be the first processing time since processed_requests_total is 0 - // When processed_requests_total == 0.0, new_avg = req_processing_time - assert!( - (avg_1 - 2000.0).abs() < f64::EPSILON, - "First calculation should be 2000, but got {avg_1}" - ); - - // Second calculation: now we have one processed request (incremented during first call) - let processing_time_2 = Duration::from_nanos(3000); - let avg_2 = repo - .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) - .await; - - // Moving average calculation: previous_avg + (new_value - previous_avg) / processed_requests_total - // After first call: processed_requests_total = 1, avg = 2000 - // During second call: processed_requests_total incremented to 2 - // new_avg = 2000 + (3000 - 2000) / 2 = 2000 + 500 = 2500 - let expected_avg_2 = 2000.0 + (3000.0 - 2000.0) / 2.0; - assert!( - (avg_2 - expected_avg_2).abs() < f64::EPSILON, - "Second calculation should be {expected_avg_2}ns, but got {avg_2}" - ); - } } From ba3d8a914e3dabe7c17c24e2a1258a35fa87199e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:10:21 +0100 Subject: [PATCH 195/247] fix: format --- packages/metrics/src/metric_collection/aggregate/avg.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/metrics/src/metric_collection/aggregate/avg.rs b/packages/metrics/src/metric_collection/aggregate/avg.rs index 936754fc4..0aef4e325 100644 --- a/packages/metrics/src/metric_collection/aggregate/avg.rs +++ b/packages/metrics/src/metric_collection/aggregate/avg.rs @@ -25,9 +25,7 @@ impl Avg for MetricCollection { impl Avg for MetricKindCollection { fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics - .get(metric_name) - .map(|metric| metric.avg(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) } } From cd57f7a78f423d9ae409fd3aa63f7fc7a517375d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:23:58 +0100 Subject: [PATCH 196/247] fix: [#1589] use average aggregation for UDP processing time metrics When calculating aggregated values for processing time metrics across multiple servers, we need to use the average (.avg()) instead of sum (.sum()) because the metric samples are already averages per server. Using sum() on pre-averaged values would produce incorrect results, as it would add up the averages rather than computing the true average across all servers. Changes: - Add new *_averaged() methods that use .avg() for proper aggregation - Update services.rs to use the corrected averaging methods - Import Avg trait for metric collection averaging functionality Fixes incorrect metric aggregation for: - udp_avg_connect_processing_time_ns - udp_avg_announce_processing_time_ns - udp_avg_scrape_processing_time_ns" --- .../src/statistics/services.rs | 6 +- .../src/statistics/metrics.rs | 267 ++++++++++++++++++ 2 files changed, 270 insertions(+), 3 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index af79c5ce7..a1edae46a 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -74,9 +74,9 @@ async fn get_protocol_metrics( let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); let udp_requests_banned = udp_server_stats.udp_requests_banned(); let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); - let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns(); - let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns(); - let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns(); + let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns_averaged(); + let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns_averaged(); + let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns_averaged(); // UDPv4 diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index e7653815f..ac9540f8e 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -3,6 +3,7 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::avg::Avg; use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; @@ -215,6 +216,48 @@ impl Metrics { .unwrap_or_default() as u64 } + /// Average processing time for UDP connect requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for connect requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_connect_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or(0.0) as u64 + } + + /// Average processing time for UDP announce requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for announce requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_announce_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or(0.0) as u64 + } + + /// Average processing time for UDP scrape requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for scrape requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_scrape_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or(0.0) as u64 + } + // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. #[must_use] @@ -1179,4 +1222,228 @@ mod tests { assert!(result.is_ok()); } } + + mod averaged_processing_time_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_connect_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + + // Set different gauge values for connect requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2000.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 2000) / 2 = 1500 + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_announce_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "announce"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "announce"), ("server_id", "server3")]); + + // Set different gauge values for announce requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 3000.0, + now, + ) + .unwrap(); + + // Should return the average: (1500 + 2500 + 3000) / 3 = 2333 (truncated) + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2333); + } + + #[test] + fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_scrape_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server2")]); + + // Set different gauge values for scrape requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1500.0, + now, + ) + .unwrap(); + + // Should return the average: (500 + 1500) / 2 = 1000 + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_handle_fractional_averages_with_truncation() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "connect"), ("server_id", "server3")]); + + // Set values that will result in a fractional average + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1001.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 1001.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 1001 + 1001) / 3 = 1000.666... → 1000 (truncated) + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_only_average_matching_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + // Set values for different request kinds with the same server_id + let connect_labels = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let announce_labels = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let scrape_labels = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 2000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 3000.0, + now, + ) + .unwrap(); + + // Each function should only return the value for its specific request kind + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2000); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 3000); + } + + #[test] + fn it_should_handle_single_server_averaged_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect"), ("server_id", "single_server")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1234.0, + now, + ) + .unwrap(); + + // With only one server, the average should be the same as the single value + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1234); + } + } } From 4c082faefe1ae5932cca4a7f44b0619a14a50a11 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:43:01 +0100 Subject: [PATCH 197/247] refactor: [#1589] make methods private --- packages/udp-tracker-server/src/statistics/metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index ac9540f8e..178855377 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -85,7 +85,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { + fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), @@ -106,7 +106,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { + fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), From a9acca5e73d6897c671117eae63c7e28f3e1629b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 11:24:54 +0100 Subject: [PATCH 198/247] refactor: [#1589] rename methods and remove unused code --- .../src/statistics/services.rs | 28 +- .../src/statistics/event/handler/error.rs | 2 +- .../event/handler/request_aborted.rs | 4 +- .../event/handler/request_accepted.rs | 12 +- .../event/handler/request_banned.rs | 4 +- .../event/handler/request_received.rs | 2 +- .../statistics/event/handler/response_sent.rs | 4 +- .../src/statistics/metrics.rs | 246 ++++-------------- .../src/statistics/repository.rs | 44 +++- .../tests/server/contract.rs | 4 +- 10 files changed, 119 insertions(+), 231 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index a1edae46a..f87cb8c76 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -71,8 +71,8 @@ async fn get_protocol_metrics( // UDP - let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); - let udp_requests_banned = udp_server_stats.udp_requests_banned(); + let udp_requests_aborted = udp_server_stats.udp_requests_aborted_total(); + let udp_requests_banned = udp_server_stats.udp_requests_banned_total(); let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns_averaged(); let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns_averaged(); @@ -80,21 +80,21 @@ async fn get_protocol_metrics( // UDPv4 - let udp4_requests = udp_server_stats.udp4_requests(); - let udp4_connections_handled = udp_server_stats.udp4_connections_handled(); - let udp4_announces_handled = udp_server_stats.udp4_announces_handled(); - let udp4_scrapes_handled = udp_server_stats.udp4_scrapes_handled(); - let udp4_responses = udp_server_stats.udp4_responses(); - let udp4_errors_handled = udp_server_stats.udp4_errors_handled(); + let udp4_requests = udp_server_stats.udp4_requests_received_total(); + let udp4_connections_handled = udp_server_stats.udp4_connect_requests_accepted_total(); + let udp4_announces_handled = udp_server_stats.udp4_announce_requests_accepted_total(); + let udp4_scrapes_handled = udp_server_stats.udp4_scrape_requests_accepted_total(); + let udp4_responses = udp_server_stats.udp4_responses_sent_total(); + let udp4_errors_handled = udp_server_stats.udp4_errors_total(); // UDPv6 - let udp6_requests = udp_server_stats.udp6_requests(); - let udp6_connections_handled = udp_server_stats.udp6_connections_handled(); - let udp6_announces_handled = udp_server_stats.udp6_announces_handled(); - let udp6_scrapes_handled = udp_server_stats.udp6_scrapes_handled(); - let udp6_responses = udp_server_stats.udp6_responses(); - let udp6_errors_handled = udp_server_stats.udp6_errors_handled(); + let udp6_requests = udp_server_stats.udp6_requests_received_total(); + let udp6_connections_handled = udp_server_stats.udp6_connect_requests_accepted_total(); + let udp6_announces_handled = udp_server_stats.udp6_announce_requests_accepted_total(); + let udp6_scrapes_handled = udp_server_stats.udp6_scrape_requests_accepted_total(); + let udp6_responses = udp_server_stats.udp6_responses_sent_total(); + let udp6_errors_handled = udp_server_stats.udp6_errors_total(); // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index d83a0584d..63e480ca5 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -137,6 +137,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_errors_handled(), 1); + assert_eq!(stats.udp4_errors_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index 19e410d5e..f340fe51a 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -54,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } #[tokio::test] @@ -77,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index af92636df..33971926e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -61,7 +61,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled(), 1); + assert_eq!(stats.udp4_connect_requests_accepted_total(), 1); } #[tokio::test] @@ -89,7 +89,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled(), 1); + assert_eq!(stats.udp4_announce_requests_accepted_total(), 1); } #[tokio::test] @@ -115,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled(), 1); + assert_eq!(stats.udp4_scrape_requests_accepted_total(), 1); } #[tokio::test] @@ -141,7 +141,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled(), 1); + assert_eq!(stats.udp6_connect_requests_accepted_total(), 1); } #[tokio::test] @@ -169,7 +169,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled(), 1); + assert_eq!(stats.udp6_announce_requests_accepted_total(), 1); } #[tokio::test] @@ -195,6 +195,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled(), 1); + assert_eq!(stats.udp6_scrape_requests_accepted_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index 8badfa137..10f6cad88 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -54,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } #[tokio::test] @@ -77,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index eced5a215..148b9d8da 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -54,6 +54,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_requests(), 1); + assert_eq!(stats.udp4_requests_received_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 34093f511..b1a046b5b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -105,7 +105,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_responses(), 1); + assert_eq!(stats.udp4_responses_sent_total(), 1); } #[tokio::test] @@ -136,6 +136,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_responses(), 1); + assert_eq!(stats.udp6_responses_sent_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 178855377..e167dc5ae 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -97,7 +97,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_request_accepted(&self, label_set: &LabelSet) -> u64 { + pub fn udp_request_accepted_total(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), label_set) .unwrap_or_default() as u64 @@ -151,7 +151,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_requests_aborted(&self) -> u64 { + pub fn udp_requests_aborted_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) .unwrap_or_default() as u64 @@ -161,7 +161,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_requests_banned(&self) -> u64 { + pub fn udp_requests_banned_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) .unwrap_or_default() as u64 @@ -177,45 +177,6 @@ impl Metrics { .unwrap_or_default() as u64 } - /// Average rounded time spent processing UDP connect requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_connect_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "connect")].into(), - ) - .unwrap_or_default() as u64 - } - - /// Average rounded time spent processing UDP announce requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_announce_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "announce")].into(), - ) - .unwrap_or_default() as u64 - } - - /// Average rounded time spent processing UDP scrape requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_scrape_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "scrape")].into(), - ) - .unwrap_or_default() as u64 - } - /// Average processing time for UDP connect requests across all servers (in nanoseconds). /// This calculates the average of all gauge samples for connect requests. #[must_use] @@ -263,7 +224,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_requests(&self) -> u64 { + pub fn udp4_requests_received_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -276,7 +237,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_connections_handled(&self) -> u64 { + pub fn udp4_connect_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -289,7 +250,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_announces_handled(&self) -> u64 { + pub fn udp4_announce_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -302,7 +263,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_scrapes_handled(&self) -> u64 { + pub fn udp4_scrape_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -315,7 +276,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_responses(&self) -> u64 { + pub fn udp4_responses_sent_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), @@ -328,7 +289,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_errors_handled(&self) -> u64 { + pub fn udp4_errors_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), @@ -342,7 +303,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_requests(&self) -> u64 { + pub fn udp6_requests_received_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -355,7 +316,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_connections_handled(&self) -> u64 { + pub fn udp6_connect_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -368,7 +329,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_announces_handled(&self) -> u64 { + pub fn udp6_announce_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -381,7 +342,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_scrapes_handled(&self) -> u64 { + pub fn udp6_scrape_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -394,7 +355,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_responses(&self) -> u64 { + pub fn udp6_responses_sent_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), @@ -407,7 +368,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_errors_handled(&self) -> u64 { + pub fn udp6_errors_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), @@ -534,7 +495,7 @@ mod tests { #[test] fn it_should_return_zero_for_udp_requests_aborted_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp_requests_aborted(), 0); + assert_eq!(metrics.udp_requests_aborted_total(), 0); } #[test] @@ -550,13 +511,13 @@ mod tests { .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) .unwrap(); - assert_eq!(metrics.udp_requests_aborted(), 2); + assert_eq!(metrics.udp_requests_aborted_total(), 2); } #[test] fn it_should_return_zero_for_udp_requests_banned_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp_requests_banned(), 0); + assert_eq!(metrics.udp_requests_banned_total(), 0); } #[test] @@ -571,7 +532,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp_requests_banned(), 3); + assert_eq!(metrics.udp_requests_banned_total(), 3); } #[test] @@ -594,89 +555,13 @@ mod tests { } } - mod udp_performance_metrics { - use super::*; - - #[test] - fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_connect_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "connect")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 1500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1500); - } - - #[test] - fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_announce_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "announce")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 2500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 2500); - } - - #[test] - fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_scrape_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "scrape")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 3500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 3500); - } - } - mod udpv4_metrics { use super::*; #[test] fn it_should_return_zero_for_udp4_requests_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_requests(), 0); + assert_eq!(metrics.udp4_requests_received_total(), 0); } #[test] @@ -691,13 +576,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_requests(), 5); + assert_eq!(metrics.udp4_requests_received_total(), 5); } #[test] fn it_should_return_zero_for_udp4_connections_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_connections_handled(), 0); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 0); } #[test] @@ -712,13 +597,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 3); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); } #[test] fn it_should_return_zero_for_udp4_announces_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_announces_handled(), 0); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 0); } #[test] @@ -733,13 +618,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_announces_handled(), 7); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 7); } #[test] fn it_should_return_zero_for_udp4_scrapes_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_scrapes_handled(), 0); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 0); } #[test] @@ -754,13 +639,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_scrapes_handled(), 4); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 4); } #[test] fn it_should_return_zero_for_udp4_responses_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_responses(), 0); + assert_eq!(metrics.udp4_responses_sent_total(), 0); } #[test] @@ -775,13 +660,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_responses(), 6); + assert_eq!(metrics.udp4_responses_sent_total(), 6); } #[test] fn it_should_return_zero_for_udp4_errors_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_errors_handled(), 0); + assert_eq!(metrics.udp4_errors_total(), 0); } #[test] @@ -796,7 +681,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_errors_handled(), 2); + assert_eq!(metrics.udp4_errors_total(), 2); } } @@ -806,7 +691,7 @@ mod tests { #[test] fn it_should_return_zero_for_udp6_requests_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_requests(), 0); + assert_eq!(metrics.udp6_requests_received_total(), 0); } #[test] @@ -821,13 +706,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_requests(), 8); + assert_eq!(metrics.udp6_requests_received_total(), 8); } #[test] fn it_should_return_zero_for_udp6_connections_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_connections_handled(), 0); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 0); } #[test] @@ -842,13 +727,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_connections_handled(), 4); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 4); } #[test] fn it_should_return_zero_for_udp6_announces_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_announces_handled(), 0); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 0); } #[test] @@ -863,13 +748,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_announces_handled(), 9); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 9); } #[test] fn it_should_return_zero_for_udp6_scrapes_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_scrapes_handled(), 0); + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 0); } #[test] @@ -884,13 +769,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_scrapes_handled(), 6); + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 6); } #[test] fn it_should_return_zero_for_udp6_responses_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_responses(), 0); + assert_eq!(metrics.udp6_responses_sent_total(), 0); } #[test] @@ -905,13 +790,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_responses(), 11); + assert_eq!(metrics.udp6_responses_sent_total(), 11); } #[test] fn it_should_return_zero_for_udp6_errors_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_errors_handled(), 0); + assert_eq!(metrics.udp6_errors_total(), 0); } #[test] @@ -926,7 +811,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_errors_handled(), 3); + assert_eq!(metrics.udp6_errors_total(), 3); } } @@ -954,8 +839,8 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_requests(), 3); - assert_eq!(metrics.udp6_requests(), 7); + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 7); } #[test] @@ -994,9 +879,9 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 2); - assert_eq!(metrics.udp4_announces_handled(), 5); - assert_eq!(metrics.udp4_scrapes_handled(), 1); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 5); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 1); } #[test] @@ -1053,10 +938,10 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 3); - assert_eq!(metrics.udp6_connections_handled(), 2); - assert_eq!(metrics.udp4_announces_handled(), 4); - assert_eq!(metrics.udp6_announces_handled(), 6); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 4); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 6); } } @@ -1076,7 +961,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp_requests_aborted(), 1000); + assert_eq!(metrics.udp_requests_aborted_total(), 1000); } #[test] @@ -1106,25 +991,6 @@ mod tests { assert_eq!(metrics.udp_banned_ips_total(), 0); } - #[test] - fn it_should_handle_fractional_gauge_values_with_truncation() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "connect")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 1234.567, - now, - ) - .unwrap(); - - // Should truncate to 1234 - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1234); - } - #[test] fn it_should_overwrite_gauge_values_when_set_multiple_times() { let mut metrics = Metrics::default(); @@ -1155,7 +1021,7 @@ mod tests { let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &empty_labels, now); assert!(result.is_ok()); - assert_eq!(metrics.udp_requests_aborted(), 1); + assert_eq!(metrics.udp_requests_aborted_total(), 1); } #[test] @@ -1180,8 +1046,8 @@ mod tests { } // Should return labeled sums correctly - assert_eq!(metrics.udp4_requests(), 3); - assert_eq!(metrics.udp6_requests(), 5); + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 5); } } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 85e3bbe64..7a1c5fa4a 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -95,6 +95,7 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_name; use super::*; @@ -155,8 +156,8 @@ mod tests { let stats = repo.get_stats().await; // Should be able to read metrics through the guard - assert_eq!(stats.udp_requests_aborted(), 0); - assert_eq!(stats.udp_requests_banned(), 0); + assert_eq!(stats.udp_requests_aborted_total(), 0); + assert_eq!(stats.udp_requests_banned_total(), 0); } #[tokio::test] @@ -174,7 +175,7 @@ mod tests { // Verify the counter was incremented let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } #[tokio::test] @@ -192,7 +193,7 @@ mod tests { // Verify the counter was incremented correctly let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 5); + assert_eq!(stats.udp_requests_aborted_total(), 5); } #[tokio::test] @@ -214,8 +215,8 @@ mod tests { // Verify both labeled metrics let stats = repo.get_stats().await; - assert_eq!(stats.udp4_requests(), 1); - assert_eq!(stats.udp6_requests(), 1); + assert_eq!(stats.udp4_requests_received_total(), 1); + assert_eq!(stats.udp6_requests_received_total(), 1); } #[tokio::test] @@ -286,8 +287,29 @@ mod tests { // Verify both labeled metrics let stats = repo.get_stats().await; - assert_eq!(stats.udp_avg_connect_processing_time_ns(), 1000); - assert_eq!(stats.udp_avg_announce_processing_time_ns(), 2000); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_connect_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_announce_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64; + + assert_eq!(udp_avg_connect_processing_time_ns, 1000); + assert_eq!(udp_avg_announce_processing_time_ns, 2000); } #[tokio::test] @@ -452,7 +474,7 @@ mod tests { // Verify all increments were properly recorded let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 50); // 10 tasks * 5 increments each + assert_eq!(stats.udp_requests_aborted_total(), 50); // 10 tasks * 5 increments each } #[tokio::test] @@ -511,9 +533,9 @@ mod tests { // Check final state let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); assert_eq!(stats.udp_banned_ips_total(), 10); - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } #[tokio::test] diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 2745f3407..da08bc177 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -273,7 +273,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned(); + .udp_requests_banned_total(); // This should return a timeout error match client.send(announce_request.into()).await { @@ -289,7 +289,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned(); + .udp_requests_banned_total(); let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 From dc8d4a9b9874b03a7724b17d0494e84430d95d45 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 16:23:07 +0100 Subject: [PATCH 199/247] test: [#1589] add race condition test for UDP performance metrics MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a comprehensive unit test to validate thread safety when updating UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS metrics under concurrent load. The test: - Spawns 200 concurrent tasks (100 per server) simulating two UDP servers - Server 1: cycles through [1000, 2000, 3000, 4000, 5000] ns processing times - Server 2: cycles through [2000, 3000, 4000, 5000, 6000] ns processing times - Validates request counts, average calculations, and metric relationships - Uses tolerance-based assertions (±50ns) to account for moving average calculation variations in concurrent environments - Ensures thread safety and mathematical correctness of the metrics system This test helps ensure the UDP tracker server's metrics collection remains accurate and thread-safe under high-concurrency scenarios. --- .../src/statistics/repository.rs | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 7a1c5fa4a..b80b8ba09 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -564,4 +564,206 @@ mod tests { // Should handle NaN values assert!(result.is_ok()); } + + #[tokio::test] + #[allow(clippy::too_many_lines)] + async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { + // Number of concurrent requests per server + const REQUESTS_PER_SERVER: usize = 100; + + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Define labels for two different UDP servers + let server1_labels = LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", "6868"), + ]); + let server2_labels = LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", "6969"), + ]); + + let mut handles = vec![]; + + // Spawn tasks for server 1 + for i in 0..REQUESTS_PER_SERVER { + let repo_clone = repo.clone(); + let labels = server1_labels.clone(); + let handle = tokio::spawn(async move { + // Simulate varying processing times (1000ns to 5000ns) + let processing_time_ns = 1000 + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) + .await + }); + handles.push(handle); + } + + // Spawn tasks for server 2 + for i in 0..REQUESTS_PER_SERVER { + let repo_clone = repo.clone(); + let labels = server2_labels.clone(); + let handle = tokio::spawn(async move { + // Simulate different processing times (2000ns to 6000ns) + let processing_time_ns = 2000 + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) + .await + }); + handles.push(handle); + } + + // Collect all the results + let mut server1_results = Vec::new(); + let mut server2_results = Vec::new(); + + for (i, handle) in handles.into_iter().enumerate() { + let result = handle.await.unwrap(); + if i < REQUESTS_PER_SERVER { + server1_results.push(result); + } else { + server2_results.push(result); + } + } + + // Verify that all tasks completed successfully + assert_eq!(server1_results.len(), REQUESTS_PER_SERVER); + assert_eq!(server2_results.len(), REQUESTS_PER_SERVER); + + // Verify that all results are finite and positive + for result in &server1_results { + assert!(result.is_finite(), "Server 1 result should be finite: {result}"); + assert!(*result > 0.0, "Server 1 result should be positive: {result}"); + } + + for result in &server2_results { + assert!(result.is_finite(), "Server 2 result should be finite: {result}"); + assert!(*result > 0.0, "Server 2 result should be positive: {result}"); + } + + // Get final stats and verify metrics integrity + let stats = repo.get_stats().await; + + // Verify that the processed requests counters are correct for each server + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server1_processed = stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &server1_labels, + ) + .unwrap() + .value(); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server2_processed = stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &server2_labels, + ) + .unwrap() + .value(); + + assert_eq!( + server1_processed, REQUESTS_PER_SERVER as u64, + "Server 1 should have processed {REQUESTS_PER_SERVER} requests", + ); + assert_eq!( + server2_processed, REQUESTS_PER_SERVER as u64, + "Server 2 should have processed {REQUESTS_PER_SERVER} requests", + ); + + // Verify that the final average processing times are reasonable + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server1_final_avg = stats + .metric_collection + .get_gauge_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &server1_labels, + ) + .unwrap() + .value(); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server2_final_avg = stats + .metric_collection + .get_gauge_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &server2_labels, + ) + .unwrap() + .value(); + + // Server 1: 100 requests cycling through [1000, 2000, 3000, 4000, 5000] ns + // Expected average: (20×1000 + 20×2000 + 20×3000 + 20×4000 + 20×5000) / 100 = 3000 ns + // Note: Moving average with concurrent updates may have small deviations due to order dependency + assert!( + (server1_final_avg - 3000.0).abs() < 50.0, + "Server 1 final average should be close to 3000ns (±50ns), got {server1_final_avg}ns" + ); + + // Server 2: 100 requests cycling through [2000, 3000, 4000, 5000, 6000] ns + // Expected average: (20×2000 + 20×3000 + 20×4000 + 20×5000 + 20×6000) / 100 = 4000 ns + // Note: Moving average with concurrent updates may have small deviations due to order dependency + assert!( + (server2_final_avg - 4000.0).abs() < 50.0, + "Server 2 final average should be close to 4000ns (±50ns), got {server2_final_avg}ns" + ); + + // Verify that the two servers have different averages (they should since they have different processing time ranges) + assert!( + (server1_final_avg - server2_final_avg).abs() > 950.0, + "Server 1 and Server 2 should have different average processing times" + ); + + // Server 2 should generally have higher averages since its processing times are higher + assert!( + server2_final_avg > server1_final_avg, + "Server 2 average ({server2_final_avg}) should be higher than Server 1 average ({server1_final_avg})" + ); + + // Verify that the moving average calculation maintains consistency + // The last result for each server should match the final stored average + let server1_last_result = server1_results.last().copied().unwrap(); + let server2_last_result = server2_results.last().copied().unwrap(); + + // Note: Due to race conditions, the last result might not exactly match the final stored average + // but it should be in a reasonable range. We'll check that they're in the same ballpark. + let server1_diff = (server1_last_result - server1_final_avg).abs(); + let server2_diff = (server2_last_result - server2_final_avg).abs(); + + assert!( + server1_diff <= 0.0, + "Server 1 last result ({server1_last_result}) should be equal to final average ({server1_final_avg}), diff: {server1_diff}", + ); + + assert!( + server2_diff <= 0.0, + "Server 2 last result ({server2_last_result}) should be equal to final average ({server2_final_avg}), diff: {server2_diff}", + ); + + // Verify that the metric collection contains the expected metrics for both servers + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + + println!( + "Race condition test completed successfully:\n Server 1: {server1_processed} requests, final avg: {server1_final_avg}ns\n Server 2: {server2_processed} requests, final avg: {server2_final_avg}ns" + ); + } } From b423bf61ee13562ef642e3b4da01868246dfeec5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 17:50:15 +0100 Subject: [PATCH 200/247] refactor: [#1589] improve readability of UDP performance metrics race condition test Restructures the race condition test to follow clear Arrange-Act-Assert pattern and eliminates code duplication through helper function extraction. The test maintains identical functionality while being more maintainable, readable, and following DRY principles. All 200 concurrent tasks still validate thread safety and mathematical correctness of the metrics system. --- .../src/statistics/repository.rs | 346 +++++++++--------- 1 file changed, 176 insertions(+), 170 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index b80b8ba09..94a86e3ab 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -565,205 +565,211 @@ mod tests { assert!(result.is_ok()); } - #[tokio::test] - #[allow(clippy::too_many_lines)] - async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { - // Number of concurrent requests per server - const REQUESTS_PER_SERVER: usize = 100; + mod race_conditions { - let repo = Repository::new(); - let now = CurrentClock::now(); + use core::f64; + use std::time::Duration; - // Define labels for two different UDP servers - let server1_labels = LabelSet::from([ - ("request_kind", "connect"), - ("server_binding_address_ip_family", "inet"), - ("server_port", "6868"), - ]); - let server2_labels = LabelSet::from([ - ("request_kind", "connect"), - ("server_binding_address_ip_family", "inet"), - ("server_port", "6969"), - ]); + use tokio::task::JoinHandle; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; - let mut handles = vec![]; + use super::*; + use crate::CurrentClock; - // Spawn tasks for server 1 - for i in 0..REQUESTS_PER_SERVER { - let repo_clone = repo.clone(); - let labels = server1_labels.clone(); - let handle = tokio::spawn(async move { - // Simulate varying processing times (1000ns to 5000ns) - let processing_time_ns = 1000 + (i % 5) * 1000; - let processing_time = Duration::from_nanos(processing_time_ns as u64); + #[tokio::test] + async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { + const REQUESTS_PER_SERVER: usize = 100; - repo_clone - .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) - .await - }); - handles.push(handle); - } + // ** Set up test data and environment ** - // Spawn tasks for server 2 - for i in 0..REQUESTS_PER_SERVER { - let repo_clone = repo.clone(); - let labels = server2_labels.clone(); - let handle = tokio::spawn(async move { - // Simulate different processing times (2000ns to 6000ns) - let processing_time_ns = 2000 + (i % 5) * 1000; - let processing_time = Duration::from_nanos(processing_time_ns as u64); + let repo = Repository::new(); + let now = CurrentClock::now(); - repo_clone - .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) - .await - }); - handles.push(handle); - } + let server1_labels = create_server_metric_labels("6868"); + let server2_labels = create_server_metric_labels("6969"); - // Collect all the results - let mut server1_results = Vec::new(); - let mut server2_results = Vec::new(); + // ** Execute concurrent metric updates ** - for (i, handle) in handles.into_iter().enumerate() { - let result = handle.await.unwrap(); - if i < REQUESTS_PER_SERVER { - server1_results.push(result); - } else { - server2_results.push(result); - } - } + // Spawn concurrent tasks for server 1 with processing times [1000, 2000, 3000, 4000, 5000] ns + let server1_handles = spawn_server_tasks(&repo, &server1_labels, 1000, now, REQUESTS_PER_SERVER); - // Verify that all tasks completed successfully - assert_eq!(server1_results.len(), REQUESTS_PER_SERVER); - assert_eq!(server2_results.len(), REQUESTS_PER_SERVER); + // Spawn concurrent tasks for server 2 with processing times [2000, 3000, 4000, 5000, 6000] ns + let server2_handles = spawn_server_tasks(&repo, &server2_labels, 2000, now, REQUESTS_PER_SERVER); - // Verify that all results are finite and positive - for result in &server1_results { - assert!(result.is_finite(), "Server 1 result should be finite: {result}"); - assert!(*result > 0.0, "Server 1 result should be positive: {result}"); - } + // Wait for both servers' results + let (server1_results, server2_results) = tokio::join!( + collect_concurrent_task_results(server1_handles), + collect_concurrent_task_results(server2_handles) + ); + + // ** Verify results and metrics ** + + // Verify correctness of concurrent operations + assert_server_results_are_valid(&server1_results, "Server 1", REQUESTS_PER_SERVER); + assert_server_results_are_valid(&server2_results, "Server 2", REQUESTS_PER_SERVER); + + let stats = repo.get_stats().await; - for result in &server2_results { - assert!(result.is_finite(), "Server 2 result should be finite: {result}"); - assert!(*result > 0.0, "Server 2 result should be positive: {result}"); + // Verify each server's metrics individually + let server1_avg = assert_server_metrics_are_correct(&stats, &server1_labels, "Server 1", REQUESTS_PER_SERVER, 3000.0); + let server2_avg = assert_server_metrics_are_correct(&stats, &server2_labels, "Server 2", REQUESTS_PER_SERVER, 4000.0); + + // Verify relationship between servers + assert_server_metrics_relationship(server1_avg, server2_avg); + + // Verify each server's result consistency individually + assert_server_result_matches_stored_average(&server1_results, &stats, &server1_labels, "Server 1"); + assert_server_result_matches_stored_average(&server2_results, &stats, &server2_labels, "Server 2"); + + // Verify metric collection integrity + assert_metric_collection_integrity(&stats); } - // Get final stats and verify metrics integrity - let stats = repo.get_stats().await; + // Test helper functions to hide implementation details - // Verify that the processed requests counters are correct for each server - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server1_processed = stats - .metric_collection - .get_counter_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), - &server1_labels, - ) - .unwrap() - .value(); + fn create_server_metric_labels(port: &str) -> LabelSet { + LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", port), + ]) + } - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server2_processed = stats - .metric_collection - .get_counter_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), - &server2_labels, - ) - .unwrap() - .value(); + fn spawn_server_tasks( + repo: &Repository, + labels: &LabelSet, + base_processing_time_ns: usize, + now: DurationSinceUnixEpoch, + requests_per_server: usize, + ) -> Vec> { + let mut handles = vec![]; + + for i in 0..requests_per_server { + let repo_clone = repo.clone(); + let labels_clone = labels.clone(); + let handle = tokio::spawn(async move { + let processing_time_ns = base_processing_time_ns + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels_clone, now) + .await + }); + handles.push(handle); + } - assert_eq!( - server1_processed, REQUESTS_PER_SERVER as u64, - "Server 1 should have processed {REQUESTS_PER_SERVER} requests", - ); - assert_eq!( - server2_processed, REQUESTS_PER_SERVER as u64, - "Server 2 should have processed {REQUESTS_PER_SERVER} requests", - ); + handles + } - // Verify that the final average processing times are reasonable - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server1_final_avg = stats - .metric_collection - .get_gauge_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &server1_labels, - ) - .unwrap() - .value(); + async fn collect_concurrent_task_results(handles: Vec>) -> Vec { + let mut server_results = Vec::new(); - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server2_final_avg = stats - .metric_collection - .get_gauge_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &server2_labels, - ) - .unwrap() - .value(); + for handle in handles { + let result = handle.await.unwrap(); + server_results.push(result); + } - // Server 1: 100 requests cycling through [1000, 2000, 3000, 4000, 5000] ns - // Expected average: (20×1000 + 20×2000 + 20×3000 + 20×4000 + 20×5000) / 100 = 3000 ns - // Note: Moving average with concurrent updates may have small deviations due to order dependency - assert!( - (server1_final_avg - 3000.0).abs() < 50.0, - "Server 1 final average should be close to 3000ns (±50ns), got {server1_final_avg}ns" - ); + server_results + } - // Server 2: 100 requests cycling through [2000, 3000, 4000, 5000, 6000] ns - // Expected average: (20×2000 + 20×3000 + 20×4000 + 20×5000 + 20×6000) / 100 = 4000 ns - // Note: Moving average with concurrent updates may have small deviations due to order dependency - assert!( - (server2_final_avg - 4000.0).abs() < 50.0, - "Server 2 final average should be close to 4000ns (±50ns), got {server2_final_avg}ns" - ); + fn assert_server_results_are_valid(results: &[f64], server_name: &str, expected_count: usize) { + // Verify all tasks completed + assert_eq!( + results.len(), + expected_count, + "{server_name} should have {expected_count} results" + ); + + // Verify all results are valid numbers + for result in results { + assert!(result.is_finite(), "{server_name} result should be finite: {result}"); + assert!(*result > 0.0, "{server_name} result should be positive: {result}"); + } + } - // Verify that the two servers have different averages (they should since they have different processing time ranges) - assert!( - (server1_final_avg - server2_final_avg).abs() > 950.0, - "Server 1 and Server 2 should have different average processing times" - ); + fn assert_server_metrics_are_correct( + stats: &Metrics, + labels: &LabelSet, + server_name: &str, + expected_request_count: usize, + expected_avg_ns: f64, + ) -> f64 { + // Verify request count + let processed_requests = get_processed_requests_count(stats, labels); + assert_eq!( + processed_requests, expected_request_count as u64, + "{server_name} should have processed {expected_request_count} requests" + ); + + // Verify average processing time is within expected range + let avg_processing_time = get_average_processing_time(stats, labels); + assert!( + (avg_processing_time - expected_avg_ns).abs() < 50.0, + "{server_name} average should be ~{expected_avg_ns}ns (±50ns), got {avg_processing_time}ns" + ); + + avg_processing_time + } - // Server 2 should generally have higher averages since its processing times are higher - assert!( - server2_final_avg > server1_final_avg, - "Server 2 average ({server2_final_avg}) should be higher than Server 1 average ({server1_final_avg})" - ); + fn assert_server_metrics_relationship(server1_avg: f64, server2_avg: f64) { + const MIN_DIFFERENCE_NS: f64 = 950.0; - // Verify that the moving average calculation maintains consistency - // The last result for each server should match the final stored average - let server1_last_result = server1_results.last().copied().unwrap(); - let server2_last_result = server2_results.last().copied().unwrap(); + assert_averages_are_significantly_different(server1_avg, server2_avg, MIN_DIFFERENCE_NS); + assert_server_ordering_is_correct(server1_avg, server2_avg); + } - // Note: Due to race conditions, the last result might not exactly match the final stored average - // but it should be in a reasonable range. We'll check that they're in the same ballpark. - let server1_diff = (server1_last_result - server1_final_avg).abs(); - let server2_diff = (server2_last_result - server2_final_avg).abs(); + fn assert_averages_are_significantly_different(avg1: f64, avg2: f64, min_difference: f64) { + let difference = (avg1 - avg2).abs(); + assert!( + difference > min_difference, + "Server averages should differ by more than {min_difference}ns, but difference was {difference}ns" + ); + } - assert!( - server1_diff <= 0.0, - "Server 1 last result ({server1_last_result}) should be equal to final average ({server1_final_avg}), diff: {server1_diff}", + fn assert_server_ordering_is_correct(server1_avg: f64, server2_avg: f64) { + // Server 2 should have higher average since it has higher processing times [2000-6000] vs [1000-5000] + assert!( + server2_avg > server1_avg, + "Server 2 average ({server2_avg}ns) should be higher than Server 1 ({server1_avg}ns) due to higher processing time ranges" ); + } - assert!( - server2_diff <= 0.0, - "Server 2 last result ({server2_last_result}) should be equal to final average ({server2_final_avg}), diff: {server2_diff}", - ); + fn assert_server_result_matches_stored_average(results: &[f64], stats: &Metrics, labels: &LabelSet, server_name: &str) { + let final_avg = get_average_processing_time(stats, labels); + let last_result = results.last().copied().unwrap(); - // Verify that the metric collection contains the expected metrics for both servers - assert!(stats - .metric_collection - .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); - assert!(stats - .metric_collection - .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + assert!( + (last_result - final_avg).abs() <= f64::EPSILON, + "{server_name} last result ({last_result}) should match final average ({final_avg}) exactly" + ); + } - println!( - "Race condition test completed successfully:\n Server 1: {server1_processed} requests, final avg: {server1_final_avg}ns\n Server 2: {server2_processed} requests, final avg: {server2_final_avg}ns" - ); + fn assert_metric_collection_integrity(stats: &Metrics) { + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + } + + fn get_processed_requests_count(stats: &Metrics, labels: &LabelSet) -> u64 { + stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + labels, + ) + .unwrap() + .value() + } + + fn get_average_processing_time(stats: &Metrics, labels: &LabelSet) -> f64 { + stats + .metric_collection + .get_gauge_value(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), labels) + .unwrap() + .value() + } } } From 364c6077bd9a4eae3200c3665ac5b7fc472dba9c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Dec 2025 12:42:58 +0000 Subject: [PATCH 201/247] fix: clippy errors --- .../src/console/clients/checker/checks/udp.rs | 1 + packages/http-protocol/src/v1/query.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 8 -------- .../swarm-coordination-registry/src/swarm/coordinator.rs | 2 +- .../src/entry/peer_list.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 2 +- packages/udp-tracker-server/src/event.rs | 2 +- packages/udp-tracker-server/tests/server/contract.rs | 2 +- src/console/ci/e2e/docker.rs | 2 +- src/console/ci/e2e/runner.rs | 2 +- 10 files changed, 9 insertions(+), 16 deletions(-) diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index 20394d55a..611afafc4 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -29,6 +29,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec for ErrorKind { }, UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::Internal { location: _, message } => Self::InternalServer(message.to_string()), + Error::Internal { location: _, message } => Self::InternalServer(message.clone()), Error::AuthRequired { location } => Self::TrackerAuthentication(location.to_string()), } } diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index da08bc177..e9691c879 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -251,7 +251,7 @@ mod receiving_an_announce_request { let transaction_id = tx_id.0.to_string(); assert!( - logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id.to_string()]), + logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id]), "Expected logs to contain: ERROR ... UDP TRACKER ... transaction_id={transaction_id}" ); } diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index ce2b1aa99..89d258d2c 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -82,7 +82,7 @@ impl Docker { let mut port_args: Vec = vec![]; for port in &options.ports { port_args.push("--publish".to_string()); - port_args.push(port.to_string()); + port_args.push(port.clone()); } let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 624878c70..6275c144b 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -77,7 +77,7 @@ pub fn run() -> anyhow::Result<()> { // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. // We could not use docker, but the intention was to create E2E tests including containerization. let options = RunOptions { - env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.clone())], ports: vec![ "6969:6969/udp".to_string(), "7070:7070/tcp".to_string(), From 11721dce92bbf4cc42d389da098a157a1a053922 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Dec 2025 12:43:42 +0000 Subject: [PATCH 202/247] chore(deps): udpate dependencies ``` $ cargo update Updating crates.io index Locking 264 packages to latest compatible versions Updating addr2line v0.24.2 -> v0.25.1 Updating aho-corasick v1.1.3 -> v1.1.4 Adding alloca v0.4.0 Removing android-tzdata v0.1.1 Updating anstream v0.6.19 -> v0.6.21 Updating anstyle v1.0.11 -> v1.0.13 Updating anstyle-query v1.1.3 -> v1.1.5 Updating anstyle-wincon v3.0.9 -> v3.0.11 Updating anyhow v1.0.98 -> v1.0.100 Adding astral-tokio-tar v0.5.6 Updating async-channel v2.3.1 -> v2.5.0 Updating async-compression v0.4.24 -> v0.4.34 Updating async-executor v1.13.2 -> v1.13.3 Updating async-io v2.4.1 -> v2.6.0 Updating async-lock v3.4.0 -> v3.4.1 Updating async-std v1.13.1 -> v1.13.2 Adding async-stream v0.3.6 Adding async-stream-impl v0.3.6 Updating async-trait v0.1.88 -> v0.1.89 Updating atomic v0.6.0 -> v0.6.1 Updating autocfg v1.4.0 -> v1.5.0 Updating axum v0.8.4 -> v0.8.7 Updating axum-core v0.5.2 -> v0.5.5 Updating axum-extra v0.10.1 -> v0.12.2 Updating axum-server v0.7.2 -> v0.7.3 Updating backtrace v0.3.75 -> v0.3.76 Updating bigdecimal v0.4.8 -> v0.4.9 Updating bindgen v0.72.0 -> v0.72.1 Removing bitflags v1.3.2 Removing bitflags v2.9.1 Adding bitflags v2.10.0 Updating blocking v1.6.1 -> v1.6.2 Updating bollard v0.18.1 -> v0.19.4 Adding bollard-buildkit-proto v0.7.0 Updating bollard-stubs v1.47.1-rc.27.3.1 -> v1.49.1-rc.28.4.0 Updating borsh v1.5.7 -> v1.6.0 Updating borsh-derive v1.5.7 -> v1.6.0 Updating brotli v8.0.1 -> v8.0.2 Updating bumpalo v3.18.1 -> v3.19.0 Updating bytemuck v1.23.1 -> v1.24.0 Updating bytes v1.10.1 -> v1.11.0 Updating camino v1.1.10 -> v1.1.12 (available: v1.2.1) Updating castaway v0.2.3 -> v0.2.4 Updating cc v1.2.26 -> v1.2.48 Updating cfg-if v1.0.1 -> v1.0.4 Updating chrono v0.4.41 -> v0.4.42 Updating clap v4.5.40 -> v4.5.53 Updating clap_builder v4.5.40 -> v4.5.53 Updating clap_derive v4.5.40 -> v4.5.49 Updating clap_lex v0.7.5 -> v0.7.6 Adding compression-codecs v0.4.33 Adding compression-core v0.4.31 Updating crc32fast v1.4.2 -> v1.5.0 Updating criterion v0.6.0 -> v0.8.0 Adding criterion-plot v0.8.0 Updating crunchy v0.2.3 -> v0.2.4 Updating crypto-common v0.1.6 -> v0.1.7 Adding darling v0.21.3 Adding darling_core v0.21.3 Adding darling_macro v0.21.3 Updating deranged v0.4.0 -> v0.5.5 Adding dyn-clone v1.0.20 Updating errno v0.3.12 -> v0.3.14 Updating etcetera v0.10.0 -> v0.11.0 Updating event-listener v5.4.0 -> v5.4.1 Adding ferroid v0.8.7 Updating filetime v0.2.25 -> v0.2.26 Adding find-msvc-tools v0.1.5 Updating flate2 v1.1.2 -> v1.1.5 Updating form_urlencoded v1.2.1 -> v1.2.2 Updating frunk v0.4.3 -> v0.4.4 Updating frunk_core v0.4.3 -> v0.4.4 Updating frunk_derives v0.4.3 -> v0.4.4 Updating frunk_proc_macro_helpers v0.1.3 -> v0.1.4 Updating frunk_proc_macros v0.1.3 -> v0.1.4 Updating fs-err v3.1.1 -> v3.2.0 Updating futures-lite v2.6.0 -> v2.6.1 Updating getrandom v0.3.3 -> v0.3.4 Updating gimli v0.31.1 -> v0.32.3 Updating glob v0.3.2 -> v0.3.3 Updating h2 v0.4.10 -> v0.4.12 Updating half v2.6.0 -> v2.7.1 Removing hashbrown v0.15.4 Adding hashbrown v0.15.5 Adding hashbrown v0.16.1 Updating hermit-abi v0.5.1 -> v0.5.2 Updating hex-literal v1.0.0 -> v1.1.0 Updating home v0.5.11 -> v0.5.12 Updating http v1.3.1 -> v1.4.0 Updating hyper v1.6.0 -> v1.8.1 Adding hyper-timeout v0.5.2 Updating hyper-util v0.1.14 -> v0.1.18 Updating iana-time-zone v0.1.63 -> v0.1.64 Updating icu_collections v2.0.0 -> v2.1.1 Updating icu_locale_core v2.0.0 -> v2.1.1 Updating icu_normalizer v2.0.0 -> v2.1.1 Updating icu_normalizer_data v2.0.0 -> v2.1.1 Updating icu_properties v2.0.1 -> v2.1.1 Updating icu_properties_data v2.0.1 -> v2.1.1 Updating icu_provider v2.0.0 -> v2.1.1 Updating idna v1.0.3 -> v1.1.0 Updating indexmap v2.9.0 -> v2.12.1 Updating iri-string v0.7.8 -> v0.7.9 Updating is-terminal v0.4.16 -> v0.4.17 Updating is_terminal_polyfill v1.70.1 -> v1.70.2 Adding itertools v0.14.0 Updating jobserver v0.1.33 -> v0.1.34 Updating js-sys v0.3.77 -> v0.3.83 Updating libc v0.2.172 -> v0.2.177 Updating libloading v0.8.8 -> v0.8.9 Updating libredox v0.1.3 -> v0.1.10 Updating libsqlite3-sys v0.34.0 -> v0.35.0 Updating libz-sys v1.1.22 -> v1.1.23 Updating linux-raw-sys v0.9.4 -> v0.11.0 Updating litemap v0.8.0 -> v0.8.1 Updating lock_api v0.4.13 -> v0.4.14 Updating log v0.4.27 -> v0.4.28 Updating memchr v2.7.4 -> v2.7.6 Updating mio v1.0.4 -> v1.1.0 Updating mockall v0.13.1 -> v0.14.0 Updating mockall_derive v0.13.1 -> v0.14.0 Updating nu-ansi-term v0.46.0 -> v0.50.3 Adding num v0.4.3 Adding num-complex v0.4.6 Adding num-iter v0.1.45 Adding num-rational v0.4.2 Updating object v0.36.7 -> v0.37.3 Updating once_cell_polyfill v1.70.1 -> v1.70.2 Updating openssl v0.10.73 -> v0.10.75 Updating openssl-sys v0.9.109 -> v0.9.111 Removing overload v0.1.1 Updating owo-colors v4.2.1 -> v4.2.3 Adding page_size v0.6.0 Updating parking_lot v0.12.4 -> v0.12.5 Updating parking_lot_core v0.9.11 -> v0.9.12 Updating pem v3.0.5 -> v3.0.6 Updating percent-encoding v2.3.1 -> v2.3.2 Adding pin-project v1.1.10 Adding pin-project-internal v1.1.10 Updating polling v3.8.0 -> v3.11.0 Updating potential_utf v0.1.2 -> v0.1.4 Updating proc-macro-crate v3.3.0 -> v3.4.0 Updating proc-macro2 v1.0.95 -> v1.0.103 Adding prost v0.14.2 Adding prost-derive v0.14.2 Adding prost-types v0.14.2 Updating quote v1.0.40 -> v1.0.42 Updating r-efi v5.2.0 -> v5.3.0 Updating r2d2_sqlite v0.29.0 -> v0.31.0 Updating rand v0.9.1 -> v0.9.2 Updating rayon v1.10.0 -> v1.11.0 Updating rayon-core v1.12.1 -> v1.13.0 Removing redox_syscall v0.3.5 Removing redox_syscall v0.5.12 Adding redox_syscall v0.5.18 Adding ref-cast v1.0.25 Adding ref-cast-impl v1.0.25 Updating regex v1.11.1 -> v1.12.2 Updating regex-automata v0.4.9 -> v0.4.13 Updating regex-syntax v0.8.5 -> v0.8.8 Updating reqwest v0.12.20 -> v0.12.24 Adding rstest v0.26.1 Adding rstest_macros v0.26.1 Updating rusqlite v0.36.0 -> v0.37.0 Updating rust_decimal v1.37.1 -> v1.39.0 Updating rustc-demangle v0.1.25 -> v0.1.26 Updating rustix v1.0.7 -> v1.1.2 Updating rustls v0.23.27 -> v0.23.35 Updating rustls-native-certs v0.8.1 -> v0.8.2 Updating rustls-pki-types v1.12.0 -> v1.13.1 Updating rustls-webpki v0.103.3 -> v0.103.8 Updating rustversion v1.0.21 -> v1.0.22 Updating schannel v0.1.27 -> v0.1.28 Adding schemars v0.9.0 Adding schemars v1.1.0 Updating security-framework v3.2.0 -> v3.5.1 Updating security-framework-sys v2.14.0 -> v2.15.0 Updating semver v1.0.26 -> v1.0.27 Updating serde v1.0.219 -> v1.0.228 Updating serde_bytes v0.11.17 -> v0.11.19 Adding serde_core v1.0.228 Updating serde_derive v1.0.219 -> v1.0.228 Updating serde_html_form v0.2.7 -> v0.2.8 Updating serde_json v1.0.140 -> v1.0.145 Updating serde_path_to_error v0.1.17 -> v0.1.20 Adding serde_spanned v1.0.3 Updating serde_with v3.12.0 -> v3.16.1 Updating serde_with_macros v3.12.0 -> v3.16.1 Updating signal-hook-registry v1.4.5 -> v1.4.7 Adding simd-adler32 v0.3.7 Updating slab v0.4.9 -> v0.4.11 Adding socket2 v0.6.1 Updating stable_deref_trait v1.2.0 -> v1.2.1 Updating syn v2.0.102 -> v2.0.111 Updating tempfile v3.20.0 -> v3.23.0 Updating terminal_size v0.4.2 -> v0.4.3 Updating testcontainers v0.24.0 -> v0.26.0 Updating thiserror v2.0.12 -> v2.0.17 Updating thiserror-impl v2.0.12 -> v2.0.17 Updating thread_local v1.1.8 -> v1.1.9 Updating time v0.3.41 -> v0.3.44 Updating time-core v0.1.4 -> v0.1.6 Updating time-macros v0.2.22 -> v0.2.24 Updating tinystr v0.8.1 -> v0.8.2 Updating tinyvec v1.9.0 -> v1.10.0 Updating tokio v1.45.1 -> v1.48.0 Updating tokio-macros v2.5.0 -> v2.6.0 Updating tokio-rustls v0.26.2 -> v0.26.4 Removing tokio-tar v0.3.1 Updating tokio-util v0.7.15 -> v0.7.17 Adding toml v0.9.8 Adding toml_datetime v0.7.3 Adding toml_edit v0.23.7 Adding toml_parser v1.0.4 Adding toml_writer v1.0.4 Adding tonic v0.14.2 Adding tonic-prost v0.14.2 Updating tower-http v0.6.6 -> v0.6.7 Updating tracing v0.1.41 -> v0.1.43 Updating tracing-attributes v0.1.29 -> v0.1.31 Updating tracing-core v0.1.34 -> v0.1.35 Updating tracing-subscriber v0.3.19 -> v0.3.22 Updating typenum v1.18.0 -> v1.19.0 Updating unicode-ident v1.0.18 -> v1.0.22 Updating unicode-width v0.2.1 -> v0.2.2 Adding ureq v3.1.4 Adding ureq-proto v0.5.3 Updating url v2.5.4 -> v2.5.7 Adding utf-8 v0.7.6 Updating uuid v1.17.0 -> v1.18.1 Updating value-bag v1.11.1 -> v1.12.0 Removing wasi v0.14.2+wasi-0.2.4 Adding wasip2 v1.0.1+wasi-0.2.4 Updating wasm-bindgen v0.2.100 -> v0.2.106 Removing wasm-bindgen-backend v0.2.100 Updating wasm-bindgen-futures v0.4.50 -> v0.4.56 Updating wasm-bindgen-macro v0.2.100 -> v0.2.106 Updating wasm-bindgen-macro-support v0.2.100 -> v0.2.106 Updating wasm-bindgen-shared v0.2.100 -> v0.2.106 Updating web-sys v0.3.77 -> v0.3.83 Adding web-time v1.1.0 Adding webpki-roots v1.0.4 Updating winapi-util v0.1.9 -> v0.1.11 Updating windows-core v0.61.2 -> v0.62.2 Updating windows-implement v0.60.0 -> v0.60.2 Updating windows-interface v0.59.1 -> v0.59.3 Updating windows-link v0.1.1 -> v0.2.1 Updating windows-registry v0.5.2 -> v0.6.1 Updating windows-result v0.3.4 -> v0.4.1 Updating windows-strings v0.4.2 -> v0.5.1 Adding windows-sys v0.60.2 Adding windows-sys v0.61.2 Updating windows-targets v0.53.0 -> v0.53.5 Updating windows_aarch64_gnullvm v0.53.0 -> v0.53.1 Updating windows_aarch64_msvc v0.53.0 -> v0.53.1 Updating windows_i686_gnu v0.53.0 -> v0.53.1 Updating windows_i686_gnullvm v0.53.0 -> v0.53.1 Updating windows_i686_msvc v0.53.0 -> v0.53.1 Updating windows_x86_64_gnu v0.53.0 -> v0.53.1 Updating windows_x86_64_gnullvm v0.53.0 -> v0.53.1 Updating windows_x86_64_msvc v0.53.0 -> v0.53.1 Updating winnow v0.7.11 -> v0.7.14 Adding wit-bindgen v0.46.0 Removing wit-bindgen-rt v0.39.0 Updating writeable v0.6.1 -> v0.6.2 Updating xattr v1.5.0 -> v1.6.1 Updating yoke v0.8.0 -> v0.8.1 Updating yoke-derive v0.8.0 -> v0.8.1 Updating zerocopy v0.8.25 -> v0.8.31 Updating zerocopy-derive v0.8.25 -> v0.8.31 Updating zeroize v1.8.1 -> v1.8.2 Updating zerotrie v0.2.2 -> v0.2.3 Updating zerovec v0.11.2 -> v0.11.5 Updating zerovec-derive v0.11.1 -> v0.11.2 Updating zstd-sys v2.0.15+zstd.1.5.7 -> v2.0.16+zstd.1.5.7 note: pass `--verbose` to see 6 unchanged dependencies behind latest ``` --- Cargo.lock | 1870 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 1187 insertions(+), 683 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b523c8b60..62d10c72f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -53,16 +53,19 @@ dependencies = [ ] [[package]] -name = "allocator-api2" -version = "0.2.21" +name = "alloca" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] [[package]] -name = "android-tzdata" -version = "0.1.1" +name = "allocator-api2" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android_system_properties" @@ -81,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.19" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -96,9 +99,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -111,29 +114,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.9" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "approx" @@ -182,6 +185,22 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "astral-tokio-tar" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5" +dependencies = [ + "filetime", + "futures-core", + "libc", + "portable-atomic", + "rustc-hash", + "tokio", + "tokio-stream", + "xattr", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -205,9 +224,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -217,25 +236,22 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.24" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d615619615a650c571269c00dca41db04b9210037fa76ed8239f70404ab56985" +checksum = "0e86f6d3dc9dc4352edeea6b8e499e13e3f5dc3b964d7ca5fd411415a3498473" dependencies = [ - "brotli", - "flate2", + "compression-codecs", + "compression-core", "futures-core", - "memchr", "pin-project-lite", "tokio", - "zstd", - "zstd-safe", ] [[package]] name = "async-executor" -version = "1.13.2" +version = "1.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" +checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" dependencies = [ "async-task", "concurrent-queue", @@ -251,7 +267,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-executor", "async-io", "async-lock", @@ -263,11 +279,11 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", @@ -276,26 +292,25 @@ dependencies = [ "polling", "rustix", "slab", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "async-lock" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" +checksum = "2c8e079a4ab67ae52b7403632e4618815d6db36d2a010cfe41b02c1b1578f93b" dependencies = [ "async-attributes", "async-channel 1.9.0", @@ -318,6 +333,28 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "async-task" version = "4.7.1" @@ -326,20 +363,20 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "atomic" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" dependencies = [ "bytemuck", ] @@ -352,15 +389,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.4" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" +checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" dependencies = [ "axum-core", "axum-macros", @@ -378,8 +415,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", @@ -404,9 +440,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" dependencies = [ "bytes", "futures-core", @@ -415,7 +451,6 @@ dependencies = [ "http-body-util", "mime", "pin-project-lite", - "rustversion", "sync_wrapper", "tower-layer", "tower-service", @@ -424,27 +459,27 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.10.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45bf463831f5131b7d3c756525b305d40f1185b688565648a92e1392ca35713d" +checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b" dependencies = [ "axum", "axum-core", "bytes", "form_urlencoded", + "futures-core", "futures-util", "http", "http-body", "http-body-util", "mime", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_html_form", "serde_path_to_error", - "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -455,14 +490,14 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "axum-server" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" +checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9" dependencies = [ "arc-swap", "bytes", @@ -482,9 +517,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -492,7 +527,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -518,9 +553,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" +checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" dependencies = [ "autocfg", "libm", @@ -537,11 +572,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.72.0" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.9.1", + "bitflags", "cexpr", "clang-sys", "itertools 0.13.0", @@ -550,7 +585,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -561,15 +596,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bittorrent-http-tracker-core" @@ -585,7 +614,7 @@ dependencies = [ "mockall", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -610,7 +639,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -646,7 +675,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -668,11 +697,11 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.1", + "rand 0.9.2", "serde", "serde_json", "testcontainers", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-rest-tracker-api-client", @@ -703,9 +732,9 @@ dependencies = [ "futures", "lazy_static", "mockall", - "rand 0.9.1", + "rand 0.9.2", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -751,11 +780,11 @@ dependencies = [ [[package]] name = "blocking" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-task", "futures-io", "futures-lite", @@ -783,13 +812,17 @@ dependencies = [ [[package]] name = "bollard" -version = "0.18.1" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" +checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" dependencies = [ + "async-stream", "base64 0.22.1", + "bitflags", + "bollard-buildkit-proto", "bollard-stubs", "bytes", + "chrono", "futures-core", "futures-util", "hex", @@ -802,7 +835,9 @@ dependencies = [ "hyper-util", "hyperlocal", "log", + "num", "pin-project-lite", + "rand 0.9.2", "rustls", "rustls-native-certs", "rustls-pemfile", @@ -812,30 +847,51 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", + "tokio-stream", "tokio-util", + "tonic", "tower-service", "url", "winapi", ] +[[package]] +name = "bollard-buildkit-proto" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a885520bf6249ab931a764ffdb87b0ceef48e6e7d807cfdb21b751e086e1ad" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tonic-prost", + "ureq", +] + [[package]] name = "bollard-stubs" -version = "1.47.1-rc.27.3.1" +version = "1.49.1-rc.28.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da" +checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" dependencies = [ + "base64 0.22.1", + "bollard-buildkit-proto", + "bytes", + "chrono", + "prost", "serde", + "serde_json", "serde_repr", "serde_with", ] [[package]] name = "borsh" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" dependencies = [ "borsh-derive", "cfg_aliases", @@ -843,22 +899,22 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "brotli" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -892,9 +948,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.18.1" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytecheck" @@ -920,9 +976,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" [[package]] name = "byteorder" @@ -932,15 +988,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] name = "camino" -version = "1.1.10" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -953,19 +1009,20 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] [[package]] name = "cc" -version = "1.2.26" +version = "1.2.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" +checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -982,9 +1039,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -994,11 +1051,10 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", @@ -1055,9 +1111,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.40" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -1065,9 +1121,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.40" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -1077,21 +1133,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.40" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "clap_lex" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" @@ -1121,6 +1177,26 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "compression-codecs" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302266479cb963552d11bd042013a58ef1adc56768016c8b82b4199488f2d4ad" +dependencies = [ + "brotli", + "compression-core", + "flate2", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1167,9 +1243,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1184,7 +1260,7 @@ dependencies = [ "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.5.0", "futures", "is-terminal", "itertools 0.10.5", @@ -1204,18 +1280,20 @@ dependencies = [ [[package]] name = "criterion" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +checksum = "a0dfe5e9e71bdcf4e4954f7d14da74d1cdb92a3a07686452d1509652684b1aab" dependencies = [ + "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.8.0", "itertools 0.13.0", "num-traits", "oorandom", + "page_size", "plotters", "rayon", "regex", @@ -1236,6 +1314,16 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "criterion-plot" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de36c2bee19fba779808f92bf5d9b0fa5a40095c277aba10c458a12b35d21d6" +dependencies = [ + "cast", + "itertools 0.13.0", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -1304,15 +1392,15 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", @@ -1324,8 +1412,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -1339,7 +1437,21 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.102", + "syn 2.0.111", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.111", ] [[package]] @@ -1348,9 +1460,20 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1369,12 +1492,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", - "serde", + "serde_core", ] [[package]] @@ -1394,7 +1517,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "unicode-xid", ] @@ -1406,7 +1529,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1433,7 +1556,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1453,6 +1576,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" version = "1.15.0" @@ -1486,23 +1615,22 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "etcetera" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c7b13d0780cb82722fd59f6f57f925e143427e4a75313a6c77243bf5326ae6" +checksum = "de48cc4d1c1d97a20fd819def54b890cadde72ed3ad0c614822a0a433361be96" dependencies = [ "cfg-if", - "home", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1513,9 +1641,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -1528,7 +1656,7 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -1550,6 +1678,17 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ferroid" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0e9414a6ae93ef993ce40a1e02944f13d4508e2bf6f1ced1580ce6910f08253" +dependencies = [ + "portable-atomic", + "rand 0.9.2", + "web-time", +] + [[package]] name = "figment" version = "0.10.19" @@ -1561,28 +1700,34 @@ dependencies = [ "pear", "serde", "tempfile", - "toml", + "toml 0.8.23", "uncased", "version_check", ] [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + [[package]] name = "flate2" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "libz-sys", @@ -1618,9 +1763,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -1653,9 +1798,9 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" [[package]] name = "frunk" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874b6a17738fc273ec753618bac60ddaeac48cb1d7684c3e7bd472e57a28b817" +checksum = "28aef0f9aa070bce60767c12ba9cb41efeaf1a2bc6427f87b7d83f11239a16d7" dependencies = [ "frunk_core", "frunk_derives", @@ -1665,53 +1810,53 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3529a07095650187788833d585c219761114005d5976185760cf794d265b6a5c" +checksum = "476eeaa382e3462b84da5d6ba3da97b5786823c2d0d3a0d04ef088d073da225c" dependencies = [ "serde", ] [[package]] name = "frunk_derives" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" +checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a956ef36c377977e512e227dcad20f68c2786ac7a54dacece3746046fea5ce" +checksum = "1952b802269f2db12ab7c0bd328d0ae8feaabf19f352a7b0af7bb0c5693abfce" dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "frunk_proc_macros" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e86c2c9183662713fea27ea527aad20fb15fee635a71081ff91bf93df4dc51" +checksum = "3462f590fa236005bd7ca4847f81438bd6fe0febd4d04e11968d4c2e96437e78" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "fs-err" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" +checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" dependencies = [ "autocfg", "tokio", @@ -1773,9 +1918,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "fastrand", "futures-core", @@ -1792,7 +1937,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1849,32 +1994,32 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", ] [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "gloo-timers" @@ -1890,9 +2035,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -1900,7 +2045,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.9.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -1909,12 +2054,13 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy 0.8.31", ] [[package]] @@ -1934,22 +2080,28 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + [[package]] name = "hashlink" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -1960,9 +2112,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -1972,27 +2124,26 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" +checksum = "e712f64ec3850b98572bffac52e2c6f282b29fe6c5fa6d42334b30be438d95c1" [[package]] name = "home" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -2033,13 +2184,14 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", + "futures-core", "h2", "http", "http-body", @@ -2047,6 +2199,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -2083,6 +2236,19 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -2101,9 +2267,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.14" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" dependencies = [ "base64 0.22.1", "bytes", @@ -2117,7 +2283,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "system-configuration", "tokio", "tower-service", @@ -2142,9 +2308,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2166,9 +2332,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", @@ -2179,9 +2345,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -2192,11 +2358,10 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -2207,42 +2372,38 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", - "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -2258,9 +2419,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2290,13 +2451,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.16.1", "serde", + "serde_core", ] [[package]] @@ -2331,9 +2493,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" dependencies = [ "memchr", "serde", @@ -2341,13 +2503,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2358,9 +2520,9 @@ checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -2381,26 +2543,35 @@ dependencies = [ ] [[package]] -name = "itoa" -version = "1.0.15" +name = "itertools" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2423,18 +2594,18 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-link", ] [[package]] @@ -2445,20 +2616,20 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.1", + "bitflags", "libc", - "redox_syscall 0.5.12", + "redox_syscall", ] [[package]] name = "libsqlite3-sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91632f3b4fb6bd1d72aa3d78f41ffecfcf2b1a6648d8c241dbe7dbfaf4875e15" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -2467,9 +2638,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -2478,15 +2649,15 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" @@ -2496,25 +2667,24 @@ checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 2.0.12", + "thiserror 2.0.17", "windows-sys 0.59.0", ] [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ "value-bag", ] @@ -2525,7 +2695,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -2536,9 +2706,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miette" @@ -2567,7 +2737,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2589,24 +2759,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", - "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] name = "mockall" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +checksum = "f58d964098a5f9c6b63d0798e5372fd04708193510a7af313c22e9f29b7b620b" dependencies = [ "cfg-if", "downcast", @@ -2618,14 +2789,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +checksum = "ca41ce716dda6a9be188b385aa78ee5260fc25cd3802cb2a8afdc6afbe6b6dbf" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2657,7 +2828,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.10", "twox-hash", "url", ] @@ -2668,14 +2839,14 @@ version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63c3512cf11487168e0e9db7157801bf5273be13055a9cc95356dc9e0035e49c" dependencies = [ - "darling", + "darling 0.20.11", "heck", "num-bigint", "proc-macro-crate", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "termcolor", "thiserror 1.0.69", ] @@ -2689,7 +2860,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.9.1", + "bitflags", "bitvec", "btoi", "byteorder", @@ -2788,12 +2959,25 @@ checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "overload", - "winapi", + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", ] [[package]] @@ -2806,6 +2990,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -2821,6 +3014,28 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -2832,9 +3047,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -2847,9 +3062,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "oorandom" @@ -2859,11 +3074,11 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.73" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.9.1", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -2880,7 +3095,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2891,9 +3106,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.109" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -2902,16 +3117,20 @@ dependencies = [ ] [[package]] -name = "overload" -version = "0.1.1" +name = "owo-colors" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" [[package]] -name = "owo-colors" -version = "4.2.1" +name = "page_size" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26995317201fa17f3656c36716aed4a7c81743a9634ac4c99c0eeda495db0cec" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "parking" @@ -2921,9 +3140,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -2931,15 +3150,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.12", + "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -2964,7 +3183,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2987,24 +3206,24 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "phf" @@ -3044,6 +3263,26 @@ dependencies = [ "siphasher", ] +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -3103,17 +3342,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.8.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", "rustix", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3133,9 +3371,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -3152,7 +3390,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.25", + "zerocopy 0.8.31", ] [[package]] @@ -3193,11 +3431,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.7", ] [[package]] @@ -3219,14 +3457,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -3239,11 +3477,43 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "version_check", "yansi", ] +[[package]] +name = "prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "101fec8d036f8d9d4a1e8ebf90d566d1d798f3b1aa379d2576a54a0d9acea5bd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prost-types" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d7b7346e150de32340ae3390b8b3ffa37ad93ec31fb5dad86afe817619e4e7" +dependencies = [ + "prost", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -3277,18 +3547,18 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "r2d2" @@ -3313,9 +3583,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35006423374afbd4b270acddcbf1e28e60f6bdaaad10c2888b8fd2fba035213c" +checksum = "63417e83dc891797eea3ad379f52a5986da4bca0d6ef28baf4d14034dd111b0c" dependencies = [ "r2d2", "rusqlite", @@ -3341,9 +3611,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -3384,14 +3654,14 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -3399,9 +3669,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -3409,27 +3679,38 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 1.3.2", + "bitflags", ] [[package]] -name = "redox_syscall" -version = "0.5.12" +name = "ref-cast" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ - "bitflags 2.9.1", + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -3439,9 +3720,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -3450,9 +3731,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "relative-path" @@ -3471,9 +3752,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", @@ -3571,10 +3852,21 @@ checksum = "6fc39292f8613e913f7df8fa892b8944ceb47c247b78e1b1ae2f09e019be789d" dependencies = [ "futures-timer", "futures-util", - "rstest_macros", + "rstest_macros 0.25.0", "rustc_version", ] +[[package]] +name = "rstest" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5a3193c063baaa2a95a33f03035c8a72b83d97a54916055ba22d35ed3839d49" +dependencies = [ + "futures-timer", + "futures-util", + "rstest_macros 0.26.1", +] + [[package]] name = "rstest_macros" version = "0.25.0" @@ -3589,17 +3881,35 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.102", + "syn 2.0.111", + "unicode-ident", +] + +[[package]] +name = "rstest_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c845311f0ff7951c5506121a9ad75aec44d083c31583b2ea5a30bcb0b0abba0" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.111", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de23c3319433716cf134eed225fe9986bc24f63bed9be9f20c329029e672dc7" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ - "bitflags 2.9.1", + "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3609,9 +3919,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.37.1" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" dependencies = [ "arrayvec", "borsh", @@ -3625,9 +3935,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -3646,23 +3956,24 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.1", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", @@ -3673,14 +3984,14 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.5.1", ] [[package]] @@ -3694,18 +4005,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -3714,9 +4025,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -3741,11 +4052,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3757,6 +4068,30 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -3775,7 +4110,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -3784,11 +4119,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -3797,9 +4132,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -3807,16 +4142,17 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -3832,58 +4168,70 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "serde_html_form" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" +checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.9.0", + "indexmap 2.12.1", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.12.1", "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -3894,7 +4242,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -3906,6 +4254,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3920,17 +4277,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", - "serde", - "serde_derive", + "indexmap 2.12.1", + "schemars 0.9.0", + "schemars 1.1.0", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -3938,14 +4296,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ - "darling", + "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -3987,13 +4345,19 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "simdutf8" version = "0.1.5" @@ -4008,12 +4372,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -4031,11 +4392,21 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -4058,7 +4429,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4069,7 +4440,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4122,9 +4493,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.102" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -4148,7 +4519,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4157,7 +4528,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4197,15 +4568,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4219,12 +4590,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ "rustix", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4235,18 +4606,20 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bb7577dca13ad86a78e8271ef5d322f37229ec83b8d98da6d996c588a1ddb1" +checksum = "a347cac4368ba4f1871743adb27dc14829024d26b1763572404726b0b9943eb8" dependencies = [ + "astral-tokio-tar", "async-trait", "bollard", - "bollard-stubs", "bytes", "docker_credential", "either", "etcetera", + "ferroid", "futures", + "itertools 0.14.0", "log", "memchr", "parse-display", @@ -4254,10 +4627,9 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-stream", - "tokio-tar", "tokio-util", "url", ] @@ -4269,7 +4641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "unicode-linebreak", - "unicode-width 0.2.1", + "unicode-width 0.2.2", ] [[package]] @@ -4283,11 +4655,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.17", ] [[package]] @@ -4298,35 +4670,34 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -4339,15 +4710,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -4355,9 +4726,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -4375,9 +4746,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -4390,30 +4761,29 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4428,9 +4798,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", "tokio", @@ -4447,26 +4817,11 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-tar" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" -dependencies = [ - "filetime", - "futures-core", - "libc", - "redox_syscall 0.3.5", - "tokio", - "tokio-stream", - "xattr", -] - [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", @@ -4482,9 +4837,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "indexmap 2.12.1", + "serde_core", + "serde_spanned 1.0.3", + "toml_datetime 0.7.3", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -4496,26 +4866,102 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.12.1", "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", "toml_write", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap 2.12.1", + "toml_datetime 0.7.3", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + [[package]] name = "toml_write" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + +[[package]] +name = "tonic" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +dependencies = [ + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.1", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "torrust-axum-health-check-api-server" version = "3.0.0-develop" @@ -4561,7 +5007,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.1", + "rand 0.9.2", "reqwest", "serde", "serde_bencode", @@ -4605,7 +5051,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -4636,7 +5082,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-server-lib", "torrust-tracker-configuration", @@ -4652,7 +5098,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "url", "uuid", ] @@ -4680,7 +5126,7 @@ name = "torrust-server-lib" version = "3.0.0-develop" dependencies = [ "derive_more", - "rstest", + "rstest 0.25.0", "tokio", "torrust-tracker-primitives", "tower-http", @@ -4702,12 +5148,12 @@ dependencies = [ "clap", "local-ip-address", "mockall", - "rand 0.9.1", + "rand 0.9.2", "regex", "reqwest", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-axum-health-check-api-server", @@ -4743,7 +5189,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-tracker-configuration", "tracing", @@ -4771,8 +5217,8 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", - "toml", + "thiserror 2.0.17", + "toml 0.9.8", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -4784,8 +5230,8 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.6.0", - "thiserror 2.0.12", + "criterion 0.8.0", + "thiserror 2.0.17", ] [[package]] @@ -4801,7 +5247,7 @@ dependencies = [ name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -4814,10 +5260,10 @@ dependencies = [ "derive_more", "formatjson", "pretty_assertions", - "rstest", + "rstest 0.25.0", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-primitives", "tracing", ] @@ -4830,11 +5276,11 @@ dependencies = [ "binascii", "bittorrent-primitives", "derive_more", - "rstest", + "rstest 0.25.0", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-configuration", "url", "zerocopy 0.7.35", @@ -4848,14 +5294,14 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.6.0", + "criterion 0.8.0", "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.1", - "rstest", + "rand 0.9.2", + "rstest 0.26.1", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -4871,7 +5317,7 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.1", + "rand 0.9.2", "torrust-tracker-configuration", "tracing", "tracing-subscriber", @@ -4884,12 +5330,12 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.6.0", + "criterion 0.8.0", "crossbeam-skiplist", "dashmap", "futures", "parking_lot", - "rstest", + "rstest 0.26.1", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4911,10 +5357,10 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.1", + "rand 0.9.2", "ringbuf", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-server-lib", @@ -4939,9 +5385,12 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", + "indexmap 2.12.1", "pin-project-lite", + "slab", "sync_wrapper", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4949,12 +5398,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" dependencies = [ "async-compression", - "bitflags 2.9.1", + "bitflags", "bytes", "futures-core", "futures-util", @@ -4985,9 +5434,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "log", "pin-project-lite", @@ -4997,20 +5446,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", @@ -5039,9 +5488,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "nu-ansi-term", "serde", @@ -5073,9 +5522,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "uncased" @@ -5088,9 +5537,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-linebreak" @@ -5106,9 +5555,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "unicode-xid" @@ -5122,11 +5571,39 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d39cb1dbab692d82a977c0392ffac19e188bd9186a9f32806f0aaa859d75585a" +dependencies = [ + "base64 0.22.1", + "log", + "percent-encoding", + "rustls", + "rustls-pki-types", + "ureq-proto", + "utf-8", + "webpki-roots", +] + +[[package]] +name = "ureq-proto" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" +dependencies = [ + "base64 0.22.1", + "http", + "httparse", + "log", +] + [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", @@ -5134,6 +5611,12 @@ dependencies = [ "serde", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -5148,13 +5631,13 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "js-sys", - "rand 0.9.1", + "rand 0.9.2", "wasm-bindgen", ] @@ -5166,9 +5649,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" [[package]] name = "vcpkg" @@ -5208,45 +5691,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.102", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -5257,9 +5727,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5267,36 +5737,55 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.102", - "wasm-bindgen-backend", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -5315,11 +5804,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5330,9 +5819,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", @@ -5343,37 +5832,37 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ "windows-link", "windows-result", @@ -5382,18 +5871,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -5416,6 +5905,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -5434,18 +5941,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -5456,9 +5964,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -5468,9 +5976,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -5480,9 +5988,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -5492,9 +6000,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -5504,9 +6012,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -5516,9 +6024,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -5528,9 +6036,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -5540,33 +6048,30 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.11" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.1", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -5579,9 +6084,9 @@ dependencies = [ [[package]] name = "xattr" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ "libc", "rustix", @@ -5595,11 +6100,10 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -5607,13 +6111,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "synstructure", ] @@ -5629,11 +6133,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ - "zerocopy-derive 0.8.25", + "zerocopy-derive 0.8.31", ] [[package]] @@ -5644,18 +6148,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -5675,21 +6179,21 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -5698,9 +6202,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -5709,13 +6213,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -5738,9 +6242,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", From 00db8233574ccfc07f6de996da3fd9c3c3a19b67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:55:01 +0000 Subject: [PATCH 203/247] chore(deps): bump actions/checkout from 4 to 6 --- .github/workflows/container.yaml | 2 +- .github/workflows/coverage.yaml | 2 +- .github/workflows/deployment.yaml | 4 ++-- .github/workflows/generate_coverage_pr.yaml | 2 +- .github/workflows/labels.yaml | 2 +- .github/workflows/testing.yaml | 10 +++++----- .github/workflows/upload_coverage_pr.yaml | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 9f51f3124..7416df71e 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -46,7 +46,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: compose name: Compose diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index e10c5ac66..2c8d63d6c 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 4e8fd579b..b544d1da2 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -17,7 +17,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -42,7 +42,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index d1b241b9d..8363376b2 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index bb8283f30..a312c335f 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -25,7 +25,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: sync name: Apply Labels from File diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 671864fc9..c9328d890 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -15,7 +15,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -44,7 +44,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -96,7 +96,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -119,7 +119,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -173,7 +173,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: test name: Run E2E Tests diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index 1ed2f7bcc..a2a3c82a6 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -96,7 +96,7 @@ jobs: echo "override_commit=$(> "$GITHUB_OUTPUT" - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ steps.parse_previous_artifacts.outputs.override_commit || '' }} path: repo_root From 6757705ab631c4418eca9cb75b5bc24db1e84ee5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:57:21 +0000 Subject: [PATCH 204/247] chore(deps): bump actions/upload-artifact from 4 to 5 --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index 8363376b2..6942e276f 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: codecov_report path: ./codecov.json From 46b245004abea6bbf16afdadadbf9baa617797a0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:57:29 +0000 Subject: [PATCH 205/247] chore(deps): bump actions/github-script from 7 to 8 --- .github/workflows/upload_coverage_pr.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index a2a3c82a6..8b0006a6d 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -22,7 +22,7 @@ jobs: steps: - name: "Download existing coverage report" id: prepare_report - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | var fs = require('fs'); From 6cb7cdd932385a160c8b2da76842909a2277b04b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 10:03:07 +0000 Subject: [PATCH 206/247] chore(deps): update Cargo dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - tokio: 1.45.1 → 1.48.0 - reqwest: 0.12.20 → 0.12.24 - clap: 4.5.40 → 4.5.53 - tracing-subscriber: 0.3.19 → 0.3.22 - ringbuf: 0.4.4 → 0.4.8 - uuid: 1.18.1 → 1.19.0 - and other transitive dependencies Related dependabot PRs: - #1629 (tokio) - #1630 (reqwest) - #1623 (clap) - #1614 (tracing-subscriber) - #1604 (ringbuf) --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62d10c72f..952e1d8a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3484,9 +3484,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "101fec8d036f8d9d4a1e8ebf90d566d1d798f3b1aa379d2576a54a0d9acea5bd" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -3494,9 +3494,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3507,9 +3507,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d7b7346e150de32340ae3390b8b3ffa37ad93ec31fb5dad86afe817619e4e7" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ "prost", ] @@ -5631,9 +5631,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", From a2f9657ddb1d773d1067132dc0f7a2207b99c2c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:26:22 +0000 Subject: [PATCH 207/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 49 packages to latest compatible versions Updating async-compression v0.4.34 -> v0.4.36 Updating async-lock v3.4.1 -> v3.4.2 Updating axum v0.8.7 -> v0.8.8 Updating axum-extra v0.12.2 -> v0.12.3 Updating axum-server v0.7.3 -> v0.8.0 Updating bumpalo v3.19.0 -> v3.19.1 Updating cc v1.2.48 -> v1.2.50 Updating cmake v0.1.54 -> v0.1.57 Updating compression-codecs v0.4.33 -> v0.4.35 Adding convert_case v0.10.0 Updating criterion v0.8.0 -> v0.8.1 Updating criterion-plot v0.8.0 -> v0.8.1 Adding derive_builder v0.20.2 Adding derive_builder_core v0.20.2 Adding derive_builder_macro v0.20.2 Updating derive_more v2.0.1 -> v2.1.0 Updating derive_more-impl v2.0.1 -> v2.1.0 Updating ferroid v0.8.7 -> v0.8.8 Updating fs-err v3.2.0 -> v3.2.1 Adding getset v0.1.6 Updating hyper-util v0.1.18 -> v0.1.19 Updating icu_properties v2.1.1 -> v2.1.2 Updating icu_properties_data v2.1.1 -> v2.1.2 Updating itoa v1.0.15 -> v1.0.16 Updating libc v0.2.177 -> v0.2.178 Updating libredox v0.1.10 -> v0.1.11 Updating local-ip-address v0.6.5 -> v0.6.8 Updating log v0.4.28 -> v0.4.29 Updating mio v1.1.0 -> v1.1.1 Updating neli v0.6.5 -> v0.7.3 Updating neli-proc-macros v0.1.4 -> v0.2.2 Updating portable-atomic v1.11.1 -> v1.12.0 Adding redox_syscall v0.6.0 Updating reqwest v0.12.24 -> v0.12.26 Updating rustls-pki-types v1.13.1 -> v1.13.2 Updating ryu v1.0.20 -> v1.0.21 Updating serde_spanned v1.0.3 -> v1.0.4 Updating simd-adler32 v0.3.7 -> v0.3.8 Updating supports-hyperlinks v3.1.0 -> v3.2.0 Updating testcontainers v0.26.0 -> v0.26.2 Updating toml v0.9.8 -> v0.9.10+spec-1.1.0 Updating toml_datetime v0.7.3 -> v0.7.5+spec-1.1.0 Updating toml_edit v0.23.7 -> v0.23.10+spec-1.0.0 Updating toml_parser v1.0.4 -> v1.0.6+spec-1.1.0 Updating toml_writer v1.0.4 -> v1.0.6+spec-1.1.0 Updating tower-http v0.6.7 -> v0.6.8 Updating tracing v0.1.43 -> v0.1.44 Updating tracing-core v0.1.35 -> v0.1.36 Adding unicode-segmentation v1.12.0 Removing windows-sys v0.59.0 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 278 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 171 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 952e1d8a6..da0910f48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e86f6d3dc9dc4352edeea6b8e499e13e3f5dc3b964d7ca5fd411415a3498473" +checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" dependencies = [ "compression-codecs", "compression-core", @@ -297,9 +297,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" dependencies = [ "event-listener 5.4.1", "event-listener-strategy", @@ -395,9 +395,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ "axum-core", "axum-macros", @@ -459,9 +459,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b" +checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1" dependencies = [ "axum", "axum-core", @@ -495,12 +495,13 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9" +checksum = "b1df331683d982a0b9492b38127151e6453639cd34926eb9c07d4cd8c6d22bfc" dependencies = [ "arc-swap", "bytes", + "either", "fs-err", "http", "http-body", @@ -508,7 +509,6 @@ dependencies = [ "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile", "rustls-pki-types", "tokio", "tokio-rustls", @@ -948,9 +948,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytecheck" @@ -1018,9 +1018,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.48" +version = "1.2.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" +checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" dependencies = [ "find-msvc-tools", "jobserver", @@ -1151,9 +1151,9 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -1179,9 +1179,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.33" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302266479cb963552d11bd042013a58ef1adc56768016c8b82b4199488f2d4ad" +checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" dependencies = [ "brotli", "compression-core", @@ -1206,6 +1206,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1280,16 +1289,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0dfe5e9e71bdcf4e4954f7d14da74d1cdb92a3a07686452d1509652684b1aab" +checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot 0.8.0", + "criterion-plot 0.8.1", "itertools 0.13.0", "num-traits", "oorandom", @@ -1316,9 +1325,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de36c2bee19fba779808f92bf5d9b0fa5a40095c277aba10c458a12b35d21d6" +checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" dependencies = [ "cast", "itertools 0.13.0", @@ -1500,23 +1509,56 @@ dependencies = [ "serde_core", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.111", +] + [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version", "syn 2.0.111", "unicode-xid", ] @@ -1680,9 +1722,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ferroid" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e9414a6ae93ef993ce40a1e02944f13d4508e2bf6f1ced1580ce6910f08253" +checksum = "ce161062fb044bd629c2393590efd47cab8d0241faf15704ffb0d47b7b4e4a35" dependencies = [ "portable-atomic", "rand 0.9.2", @@ -1854,9 +1896,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" +checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8" dependencies = [ "autocfg", "tokio", @@ -2009,6 +2051,18 @@ dependencies = [ "wasip2", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "gimli" version = "0.32.3" @@ -2267,9 +2321,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", @@ -2378,9 +2432,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -2392,9 +2446,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -2553,9 +2607,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" [[package]] name = "jobserver" @@ -2594,9 +2648,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libloading" @@ -2616,13 +2670,13 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" dependencies = [ "bitflags", "libc", - "redox_syscall", + "redox_syscall 0.6.0", ] [[package]] @@ -2661,14 +2715,14 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" dependencies = [ "libc", "neli", "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2682,9 +2736,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" dependencies = [ "value-bag", ] @@ -2764,9 +2818,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -2918,27 +2972,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" dependencies = [ + "bitflags", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -3156,7 +3214,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -3356,9 +3414,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" [[package]] name = "portable-atomic-util" @@ -3435,7 +3493,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -3686,6 +3744,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5" +dependencies = [ + "bitflags", +] + [[package]] name = "ref-cast" version = "1.0.25" @@ -3752,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" dependencies = [ "base64 0.22.1", "bytes", @@ -4005,9 +4072,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] @@ -4031,9 +4098,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" [[package]] name = "same-file" @@ -4256,9 +4323,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ "serde_core", ] @@ -4354,9 +4421,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "simdutf8" @@ -4470,9 +4537,9 @@ dependencies = [ [[package]] name = "supports-hyperlinks" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" [[package]] name = "supports-unicode" @@ -4606,9 +4673,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a347cac4368ba4f1871743adb27dc14829024d26b1763572404726b0b9943eb8" +checksum = "1483605f58b2fff80d786eb56a0b6b4e8b1e5423fbc9ec2e3e562fa2040d6f27" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4844,14 +4911,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.8" +version = "0.9.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +checksum = "0825052159284a1a8b4d6c0c86cbc801f2da5afd2b225fa548c72f2e74002f48" dependencies = [ "indexmap 2.12.1", "serde_core", - "serde_spanned 1.0.3", - "toml_datetime 0.7.3", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", "winnow", @@ -4868,9 +4935,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -4891,21 +4958,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ "indexmap 2.12.1", - "toml_datetime 0.7.3", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -4918,9 +4985,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" @@ -5218,7 +5285,7 @@ dependencies = [ "serde_json", "serde_with", "thiserror 2.0.17", - "toml 0.9.8", + "toml 0.9.10+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5230,7 +5297,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.8.0", + "criterion 0.8.1", "thiserror 2.0.17", ] @@ -5294,7 +5361,7 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.8.0", + "criterion 0.8.1", "crossbeam-skiplist", "futures", "mockall", @@ -5330,7 +5397,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.8.0", + "criterion 0.8.1", "crossbeam-skiplist", "dashmap", "futures", @@ -5398,9 +5465,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "async-compression", "bitflags", @@ -5434,9 +5501,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -5457,9 +5524,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -5547,6 +5614,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-width" version = "0.1.14" @@ -5896,15 +5969,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.60.2" From 4c16227bdaa03c236cc597b93dc49563224e0afe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:35:13 +0000 Subject: [PATCH 208/247] fix: E0107 - missing generics for struct axum_server::Server in from_tcp_with_timeouts error[E0107]: missing generics for struct `axum_server::Server` --> packages/axum-server/src/custom_axum_server.rs:44:55 | 44 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { | ^^^^^^ expected at least 1 generic argument Added SocketAddr generic parameter to Server return type and Address trait bound to add_timeouts function. --- packages/axum-server/src/custom_axum_server.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index 5705ef24e..fccaf54dc 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -18,7 +18,7 @@ //! If you want to know more about Axum and timeouts see . use std::future::Ready; use std::io::ErrorKind; -use std::net::TcpListener; +use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; @@ -41,7 +41,7 @@ const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); #[must_use] -pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { +pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { add_timeouts(axum_server::from_tcp(socket)) } @@ -50,7 +50,7 @@ pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> add_timeouts(axum_server::from_tcp_rustls(socket, tls)) } -fn add_timeouts(mut server: Server) -> Server { +fn add_timeouts(mut server: Server) -> Server { server.http_builder().http1().timer(TokioTimer::new()); server.http_builder().http2().timer(TokioTimer::new()); From 51452a8b2a7aee04822e90651179c2b0a8bb031f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:38:42 +0000 Subject: [PATCH 209/247] fix: E0277 and E0308 - RustlsAcceptor trait bounds and type mismatch in from_tcp_rustls_with_timeouts error[E0277]: the trait bound `RustlsAcceptor: Address` is not satisfied --> packages/axum-server/src/custom_axum_server.rs:49:81 | 49 | ... tls: RustlsConfig) -> Server { | ^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound error[E0308]: mismatched types --> packages/axum-server/src/custom_axum_server.rs:50:18 | 50 | add_timeouts(axum_server::from_tcp_rustls(socket, tls)) | ------------ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Server`, found `Result, ...>` Changed return type to Result, std::io::Error> and used map to apply add_timeouts to the Result value. --- packages/axum-server/src/custom_axum_server.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index fccaf54dc..39a2271d6 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -36,6 +36,8 @@ use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; use tokio::time::{Instant, Sleep}; use tower::Service; +type RustlsServerResult = Result, std::io::Error>; + const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); @@ -46,8 +48,8 @@ pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { } #[must_use] -pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server { - add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { + axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } fn add_timeouts(mut server: Server) -> Server { From 74d5c8b9f0520077e8ec3baf84423808f771a285 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:39:14 +0000 Subject: [PATCH 210/247] fix: E0308 - mismatched types, from_tcp returns Result in from_tcp_with_timeouts error[E0308]: mismatched types --> packages/axum-server/src/custom_axum_server.rs:47:18 | 47 | add_timeouts(axum_server::from_tcp(socket)) | ------------ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Server`, found `Result, Error>` Changed return type to Result, std::io::Error> and used map to apply add_timeouts to the Result value. --- packages/axum-server/src/custom_axum_server.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index 39a2271d6..b7f1d664e 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -37,14 +37,15 @@ use tokio::time::{Instant, Sleep}; use tower::Service; type RustlsServerResult = Result, std::io::Error>; +type ServerResult = Result, std::io::Error>; const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); #[must_use] -pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { - add_timeouts(axum_server::from_tcp(socket)) +pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { + axum_server::from_tcp(socket).map(add_timeouts) } #[must_use] From cd83cfd9491cc9369921d560467d5cbfec917d02 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:39:45 +0000 Subject: [PATCH 211/247] fix: E0631 - type mismatch in add_timeouts function arguments for acceptor type error[E0631]: type mismatch in function arguments --> packages/axum-server/src/custom_axum_server.rs:53:51 | 53 | axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) | --- ^^^^^^^^^^^^ expected due to this | = note: expected function signature `fn(Server<_, RustlsAcceptor>) -> _` found function signature `fn(Server<_, DefaultAcceptor>) -> _` Made add_timeouts generic over both Address and Acceptor types to work with both DefaultAcceptor and RustlsAcceptor. --- packages/axum-server/src/custom_axum_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index b7f1d664e..e3567bad4 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -53,7 +53,7 @@ pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } -fn add_timeouts(mut server: Server) -> Server { +fn add_timeouts(mut server: Server) -> Server { server.http_builder().http1().timer(TokioTimer::new()); server.http_builder().http2().timer(TokioTimer::new()); From 612f7f1f07e69ce3e0fdeb3c8b264f46917aa06e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:40:10 +0000 Subject: [PATCH 212/247] fix: E0107 - missing generics for struct axum_server::Handle in signals.rs error[E0107]: missing generics for struct `axum_server::Handle` --> packages/axum-server/src/signals.rs:10:26 | 10 | handle: axum_server::Handle, | ^^^^^^ expected 1 generic argument Added SocketAddr generic parameter to Handle type in graceful_shutdown function signature. --- packages/axum-server/src/signals.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/axum-server/src/signals.rs b/packages/axum-server/src/signals.rs index 268ff79fa..360879e32 100644 --- a/packages/axum-server/src/signals.rs +++ b/packages/axum-server/src/signals.rs @@ -7,7 +7,7 @@ use tracing::instrument; #[instrument(skip(handle, rx_halt, message))] pub async fn graceful_shutdown( - handle: axum_server::Handle, + handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String, address: SocketAddr, From 37793ce42e19ab922ad70948631d8f26ecad9213 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:41:05 +0000 Subject: [PATCH 213/247] fix: clippy::uninlined_format_args - variables can be used directly in format! string --> console/tracker-client/src/console/clients/udp/app.rs:178:24 | | __________________^ 179 | | ... "invalid address format: \`{}\`. Expected format is host:port", 180 | | ... tracker_socket_addr_str 181 | | ... )); | |_______^ --> console/tracker-client/src/console/clients/udp/app.rs:199:13 | 199 | ...rr(anyhow::anyhow!("DNS resolution failed for \`{}\`", tracker_socket_addr_str)) Changed format strings to use inline variable interpolation instead of positional arguments. --- console/tracker-client/src/console/clients/udp/app.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/console/tracker-client/src/console/clients/udp/app.rs b/console/tracker-client/src/console/clients/udp/app.rs index a2736c365..527f46e78 100644 --- a/console/tracker-client/src/console/clients/udp/app.rs +++ b/console/tracker-client/src/console/clients/udp/app.rs @@ -176,8 +176,7 @@ fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result anyhow::Result = resolved_addr.to_socket_addrs()?.collect(); if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + Err(anyhow::anyhow!("DNS resolution failed for `{tracker_socket_addr_str}`")) } else { Ok(socket_addrs[0]) } From f0678be9cf46a549ebd811800754f9b24b2dab7b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:41:49 +0000 Subject: [PATCH 214/247] fix: clippy::missing_errors_doc and clippy::double_must_use for from_tcp_with_timeouts error: docs for function returning `Result` missing `# Errors` section --> packages/axum-server/src/custom_axum_server.rs:47:1 | 47 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: this function has a `#[must_use]` attribute with no message, but returns a type already marked as `#[must_use]` --> packages/axum-server/src/custom_axum_server.rs:47:1 | 47 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Added documentation with Errors section and removed #[must_use] attribute since Result type already has it. --- packages/axum-server/src/custom_axum_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index e3567bad4..f332c9288 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -43,7 +43,11 @@ const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); -#[must_use] +/// Creates an Axum server from a TCP listener with configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket. pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { axum_server::from_tcp(socket).map(add_timeouts) } From ea001980306165b863393cc3b0ef1f28c0b61cf9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:42:12 +0000 Subject: [PATCH 215/247] fix: clippy::missing_errors_doc and clippy::double_must_use for from_tcp_rustls_with_timeouts error: docs for function returning `Result` missing `# Errors` section --> packages/axum-server/src/custom_axum_server.rs:52:1 | 52 | pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: this function has a `#[must_use]` attribute with no message, but returns a type already marked as `#[must_use]` --> packages/axum-server/src/custom_axum_server.rs:52:1 | 52 | pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Added documentation with Errors section and removed #[must_use] attribute since Result type already has it. --- packages/axum-server/src/custom_axum_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index f332c9288..0328198ec 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -52,7 +52,11 @@ pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { axum_server::from_tcp(socket).map(add_timeouts) } -#[must_use] +/// Creates an Axum server from a TCP listener with TLS and configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket or if TLS configuration fails. pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } From a217bb924a427e281f8df91b7f0a299627293a78 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:43:28 +0000 Subject: [PATCH 216/247] fix: E0599 - no method named handle found for Result in health-check-api-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-health-check-api-server/src/server.rs:120:10 | 119 | let running = axum_server::from_tcp(socket) | ___________________- 120 | | .handle(handle) | |_________-^^^^^^ Added expect() to unwrap Result before calling handle() method since from_tcp now returns Result. --- packages/axum-health-check-api-server/src/server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index 3eeb1b054..c261f6af8 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -117,6 +117,7 @@ pub fn start( )); let running = axum_server::from_tcp(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .serve(router.into_make_service_with_connect_info::()); From 054843477e3c73d132b7ca71dee208e3bec65dd8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:44:09 +0000 Subject: [PATCH 217/247] fix: E0599 and E0282 - no method named handle found for Result in axum-http-tracker-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-http-tracker-server/src/server.rs:77:22 | 76 | ... Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) | ____________________- 77 | | ... .handle(handle) | |___________-^^^^^^ error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-http-tracker-server/src/server.rs:85:22 | 84 | None => custom_axum_server::from_tcp_with_timeouts(socket) | _________________________- 85 | | .handle(handle) | |_____________________-^^^^^^ Added expect() calls to unwrap Result before calling handle() method for both TLS and non-TLS cases. --- packages/axum-http-tracker-server/src/server.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 2b43be0a9..4b7c15de8 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -74,6 +74,7 @@ impl Launcher { let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -82,6 +83,7 @@ impl Launcher { .await .expect("Axum server crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::()) From 02e43394e2d30370ae8214b2d513c947773123b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:45:06 +0000 Subject: [PATCH 218/247] fix: E0599 and E0282 - no method named handle found for Result in axum-rest-tracker-api-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-rest-tracker-api-server/src/server.rs:272:22 | 271 | ... Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) | ____________________- 272 | | ... .handle(handle) | |___________-^^^^^^ error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-rest-tracker-api-server/src/server.rs:280:22 | 279 | None => custom_axum_server::from_tcp_with_timeouts(socket) | _________________________- 280 | | .handle(handle) | |_____________________-^^^^^^ Added expect() calls to unwrap Result before calling handle() method for both TLS and non-TLS cases. --- packages/axum-rest-tracker-api-server/src/server.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index b358345fb..a867ecfcf 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -269,6 +269,7 @@ impl Launcher { let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -277,6 +278,7 @@ impl Launcher { .await .expect("Axum server for tracker API crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::()) From a62eb146fae7de0edb671c65e99d108ca18db2e5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:51:04 +0000 Subject: [PATCH 219/247] fix: runtime panic - Registering a blocking socket with tokio runtime is unsupported thread 'tokio-runtime-worker' panicked at /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/axum-server-0.8.0/src/server.rs:70:30: Registering a blocking socket with the tokio runtime is unsupported. If you wish to do anyways, please add `--cfg tokio_allow_from_blocking_fd` to your RUSTFLAGS. See github.com/tokio-rs/tokio/issues/7172 for details. Set std::net::TcpListener instances to non-blocking mode using set_nonblocking(true) before passing them to axum-server to avoid runtime panics when registering with tokio runtime. This is required since axum-server 0.8.0 and tokio v1.44.0 which added debug assertions to prevent blocking sockets from being registered with the tokio runtime. --- packages/axum-health-check-api-server/src/server.rs | 3 +++ packages/axum-http-tracker-server/src/server.rs | 3 +++ packages/axum-rest-tracker-api-server/src/server.rs | 1 + 3 files changed, 7 insertions(+) diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index c261f6af8..a371f146e 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -101,6 +101,9 @@ pub fn start( .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let protocol = Protocol::HTTP; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 4b7c15de8..69f9cb72e 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -52,6 +52,9 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index a867ecfcf..32c1051e1 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -247,6 +247,7 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket.set_nonblocking(true).expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router(http_api_container, access_tokens, address); From eccab24403fb95be99f3be3bd05875d2e2ac1916 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 09:07:47 +0000 Subject: [PATCH 220/247] style: apply cargo fmt formatting to axum-rest-tracker-api-server Format set_nonblocking call to use multi-line formatting per rustfmt conventions. --- packages/axum-rest-tracker-api-server/src/server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 32c1051e1..05adeae8a 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -247,7 +247,9 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); - socket.set_nonblocking(true).expect("Failed to set socket to non-blocking mode"); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router(http_api_container, access_tokens, address); From 38ed4cbc074c7322ba6b898bf424ba935bb419e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 16:38:41 +0000 Subject: [PATCH 221/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 4 packages to latest compatible versions Updating derive_more v2.1.0 -> v2.1.1 Updating derive_more-impl v2.1.0 -> v2.1.1 Updating reqwest v0.12.26 -> v0.12.27 Updating serde_json v1.0.145 -> v1.0.146 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da0910f48..3bdf93e00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1542,18 +1542,18 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "convert_case", "proc-macro2", @@ -3819,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.26" +version = "0.12.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" +checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" dependencies = [ "base64 0.22.1", "bytes", @@ -4278,9 +4278,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" dependencies = [ "indexmap 2.12.1", "itoa", From 767bb5c2ec9e3042ad28d0d36c7a8e1071385889 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 08:58:13 +0000 Subject: [PATCH 222/247] fix: [#1628] upgrade to Debian 13 (Trixie) to resolve security vulnerabilities - Update base images from Debian 12 (bookworm) to Debian 13 (trixie) - Update builder: rust:bookworm -> rust:trixie - Update tester: rust:slim-bookworm -> rust:slim-trixie - Update GCC: gcc:bookworm -> gcc:trixie - Update runtime: gcr.io/distroless/cc-debian12:debug -> gcr.io/distroless/cc-debian13:debug This resolves all 5 security vulnerabilities (1 CRITICAL, 4 HIGH): - CVE-2019-1010022 (CRITICAL): glibc stack guard protection bypass - CVE-2018-20796 (HIGH): glibc uncontrolled recursion - CVE-2019-1010023 (HIGH): glibc ldd malicious ELF code execution - CVE-2019-9192 (HIGH): glibc uncontrolled recursion - CVE-2023-0286 (HIGH): OpenSSL X.400 address type confusion Trivy scan results: - Before: Total 5 (CRITICAL: 1, HIGH: 4) - After: Total 0 (CRITICAL: 0, HIGH: 0) Container tested and verified working with health checks passing. --- Containerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Containerfile b/Containerfile index 263053390..e926a5202 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM docker.io/library/rust:bookworm AS chef +FROM docker.io/library/rust:trixie AS chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM docker.io/library/rust:slim-bookworm AS tester +FROM docker.io/library/rust:slim-trixie AS tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean @@ -21,7 +21,7 @@ RUN mkdir -p /app/share/torrust/default/database/; \ sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" ## Su Exe Compile -FROM docker.io/library/gcc:bookworm AS gcc +FROM docker.io/library/gcc:trixie AS gcc COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec @@ -91,7 +91,7 @@ RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin ## Runtime -FROM gcr.io/distroless/cc-debian12:debug AS runtime +FROM gcr.io/distroless/cc-debian13:debug AS runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec From 300be03c24aa769d55b8415d310c2e032cff59ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 09:50:17 +0000 Subject: [PATCH 223/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 2 packages to latest compatible versions Updating reqwest v0.12.27 -> v0.12.28 Updating rustix v1.1.2 -> v1.1.3 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bdf93e00..d0478573b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3819,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.27" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", @@ -4023,9 +4023,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", From c9c027dfe96fbb4b5558f6519cb936d9ff9f5f1d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 09:58:13 +0000 Subject: [PATCH 224/247] chore(deps): bump actions/upload-artifact from 5 to 6 Bumps actions/upload-artifact from 5 to 6. This update includes: - Node.js 24 runtime support - Requires Actions Runner version 2.327.1 or later - Fixes punycode deprecation warnings --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- cSpell.json | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index 6942e276f..f762207cf 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: codecov_report path: ./codecov.json diff --git a/cSpell.json b/cSpell.json index 76939c199..81421e050 100644 --- a/cSpell.json +++ b/cSpell.json @@ -32,6 +32,7 @@ "canonicalized", "certbot", "chrono", + "Cinstrument", "ciphertext", "clippy", "cloneable", From 8dde9c3e1b217ebd974670f5d309c590e6dba105 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 12:41:33 +0000 Subject: [PATCH 225/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 109 packages to latest compatible versions Updating arc-swap v1.7.1 -> v1.8.0 Updating async-compression v0.4.36 -> v0.4.37 Adding aws-lc-rs v1.15.4 Adding aws-lc-sys v0.37.0 Updating axum-core v0.5.5 -> v0.5.6 Updating axum-extra v0.12.3 -> v0.12.5 Updating bigdecimal v0.4.9 -> v0.4.10 Updating cc v1.2.50 -> v1.2.54 Adding cesu8 v1.1.0 Updating chrono v0.4.42 -> v0.4.43 Updating clap v4.5.53 -> v4.5.54 Updating clap_builder v4.5.53 -> v4.5.54 Updating clap_lex v0.7.6 -> v0.7.7 Adding combine v4.6.7 Updating compression-codecs v0.4.35 -> v0.4.36 Adding dunce v1.0.5 Updating ferroid v0.8.8 -> v0.8.9 Updating filetime v0.2.26 -> v0.2.27 Updating find-msvc-tools v0.1.5 -> v0.1.8 Updating flate2 v1.1.5 -> v1.1.8 Adding foldhash v0.2.0 Updating fs-err v3.2.1 -> v3.2.2 Adding fs_extra v1.3.0 Updating getrandom v0.2.16 -> v0.2.17 Updating h2 v0.4.12 -> v0.4.13 Updating hashlink v0.10.0 -> v0.11.0 Removing hyper-tls v0.6.0 Updating indexmap v2.12.1 -> v2.13.0 Updating iri-string v0.7.9 -> v0.7.10 Updating itoa v1.0.16 -> v1.0.17 Adding jni v0.21.1 Adding jni-sys v0.3.0 Updating js-sys v0.3.83 -> v0.3.85 Updating libc v0.2.178 -> v0.2.180 Updating libm v0.2.15 -> v0.2.16 Updating libredox v0.1.11 -> v0.1.12 Updating libsqlite3-sys v0.35.0 -> v0.36.0 Updating local-ip-address v0.6.8 -> v0.6.9 Adding lru-slab v0.1.2 Updating num-conv v0.1.0 -> v0.2.0 Adding openssl-probe v0.2.1 Updating portable-atomic v1.12.0 -> v1.13.0 Updating proc-macro2 v1.0.103 -> v1.0.106 Updating prost v0.14.1 -> v0.14.3 Updating prost-derive v0.14.1 -> v0.14.3 Updating prost-types v0.14.1 -> v0.14.3 Adding quinn v0.11.9 Adding quinn-proto v0.11.13 Adding quinn-udp v0.5.14 Updating quote v1.0.42 -> v1.0.44 Updating r2d2_sqlite v0.31.0 -> v0.32.0 Updating rand_core v0.9.3 -> v0.9.5 Updating redox_syscall v0.6.0 -> v0.7.0 Updating reqwest v0.12.28 -> v0.13.1 Updating rkyv v0.7.45 -> v0.7.46 Updating rkyv_derive v0.7.45 -> v0.7.46 Adding rsqlite-vfs v0.1.0 Updating rusqlite v0.37.0 -> v0.38.0 Updating rust_decimal v1.39.0 -> v1.40.0 Updating rustc-demangle v0.1.26 -> v0.1.27 Updating rustls v0.23.35 -> v0.23.36 Updating rustls-native-certs v0.8.2 -> v0.8.3 Updating rustls-pki-types v1.13.2 -> v1.14.0 Adding rustls-platform-verifier v0.6.2 Adding rustls-platform-verifier-android v0.1.1 Updating rustls-webpki v0.103.8 -> v0.103.9 Updating ryu v1.0.21 -> v1.0.22 Updating schemars v1.1.0 -> v1.2.0 Updating serde_json v1.0.146 -> v1.0.149 Updating signal-hook-registry v1.4.7 -> v1.4.8 Updating socket2 v0.6.1 -> v0.6.2 Adding sqlite-wasm-rs v0.5.2 Updating subprocess v0.2.9 -> v0.2.13 Updating syn v2.0.111 -> v2.0.114 Updating tempfile v3.23.0 -> v3.24.0 Updating testcontainers v0.26.2 -> v0.26.3 Updating thiserror v2.0.17 -> v2.0.18 Updating thiserror-impl v2.0.17 -> v2.0.18 Updating time v0.3.44 -> v0.3.46 Updating time-core v0.1.6 -> v0.1.8 Updating time-macros v0.2.24 -> v0.2.26 Updating tokio v1.48.0 -> v1.49.0 Removing tokio-native-tls v0.3.1 Updating tokio-stream v0.1.17 -> v0.1.18 Updating tokio-util v0.7.17 -> v0.7.18 Updating toml v0.9.10+spec-1.1.0 -> v0.9.11+spec-1.1.0 Updating tower v0.5.2 -> v0.5.3 Updating url v2.5.7 -> v2.5.8 Updating uuid v1.19.0 -> v1.20.0 Updating wasip2 v1.0.1+wasi-0.2.4 -> v1.0.2+wasi-0.2.9 Updating wasm-bindgen v0.2.106 -> v0.2.108 Updating wasm-bindgen-futures v0.4.56 -> v0.4.58 Updating wasm-bindgen-macro v0.2.106 -> v0.2.108 Updating wasm-bindgen-macro-support v0.2.106 -> v0.2.108 Updating wasm-bindgen-shared v0.2.106 -> v0.2.108 Updating web-sys v0.3.83 -> v0.3.85 Adding webpki-root-certs v1.0.5 Updating webpki-roots v1.0.4 -> v1.0.5 Adding windows-sys v0.45.0 Adding windows-targets v0.42.2 Adding windows_aarch64_gnullvm v0.42.2 Adding windows_aarch64_msvc v0.42.2 Adding windows_i686_gnu v0.42.2 Adding windows_i686_msvc v0.42.2 Adding windows_x86_64_gnu v0.42.2 Adding windows_x86_64_gnullvm v0.42.2 Adding windows_x86_64_msvc v0.42.2 Updating wit-bindgen v0.46.0 -> v0.51.0 Updating zerocopy v0.8.31 -> v0.8.34 Updating zerocopy-derive v0.8.31 -> v0.8.34 Adding zmij v1.0.17 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 850 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 557 insertions(+), 293 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0478573b..146da3a18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "version_check", ] @@ -175,9 +175,12 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] [[package]] name = "arrayvec" @@ -236,13 +239,12 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" +checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" dependencies = [ "compression-codecs", "compression-core", - "futures-core", "pin-project-lite", "tokio", ] @@ -352,7 +354,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -369,7 +371,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -393,6 +395,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.8.8" @@ -440,9 +464,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", @@ -459,9 +483,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1" +checksum = "fef252edff26ddba56bbcdf2ee3307b8129acb86f5749b68990c168a6fcc9c76" dependencies = [ "axum", "axum-core", @@ -490,7 +514,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -553,9 +577,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -585,7 +609,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -614,7 +638,7 @@ dependencies = [ "mockall", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -639,7 +663,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -675,7 +699,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -701,7 +725,7 @@ dependencies = [ "serde", "serde_json", "testcontainers", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-rest-tracker-api-client", @@ -734,7 +758,7 @@ dependencies = [ "mockall", "rand 0.9.2", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -847,7 +871,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -907,7 +931,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1018,9 +1042,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.50" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -1028,6 +1052,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -1051,9 +1081,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "num-traits", @@ -1111,9 +1141,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive", @@ -1121,9 +1151,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", @@ -1140,14 +1170,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" @@ -1164,6 +1194,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "compact_str" version = "0.7.1" @@ -1179,9 +1219,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.35" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" +checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" dependencies = [ "brotli", "compression-core", @@ -1446,7 +1486,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1460,7 +1500,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1471,7 +1511,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1482,7 +1522,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1527,7 +1567,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1537,7 +1577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1559,7 +1599,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-xid", ] @@ -1571,7 +1611,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1598,7 +1638,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1618,6 +1658,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.20" @@ -1722,9 +1768,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ferroid" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce161062fb044bd629c2393590efd47cab8d0241faf15704ffb0d47b7b4e4a35" +checksum = "bb330bbd4cb7a5b9f559427f06f98a4f853a137c8298f3bd3f8ca57663e21986" dependencies = [ "portable-atomic", "rand 0.9.2", @@ -1749,27 +1795,26 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.60.2", ] [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "libz-sys", @@ -1788,6 +1833,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1867,7 +1918,7 @@ checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1879,7 +1930,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1891,19 +1942,25 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "fs-err" -version = "3.2.1" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", "tokio", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1979,7 +2036,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2030,13 +2087,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -2046,9 +2105,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasip2", + "wasm-bindgen", ] [[package]] @@ -2060,7 +2121,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2089,9 +2150,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -2099,7 +2160,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -2114,7 +2175,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.31", + "zerocopy 0.8.34", ] [[package]] @@ -2140,7 +2201,7 @@ checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", ] [[package]] @@ -2148,14 +2209,17 @@ name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", +] [[package]] name = "hashlink" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.1", ] [[package]] @@ -2303,22 +2367,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.19" @@ -2337,7 +2385,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.6.2", "system-configuration", "tokio", "tower-service", @@ -2505,9 +2553,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -2547,9 +2595,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", @@ -2607,9 +2655,31 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.16" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" @@ -2623,9 +2693,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2648,9 +2718,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libloading" @@ -2664,26 +2734,26 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags", "libc", - "redox_syscall 0.6.0", + "redox_syscall 0.7.0", ] [[package]] name = "libsqlite3-sys" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" dependencies = [ "cc", "pkg-config", @@ -2715,13 +2785,12 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" +checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" dependencies = [ "libc", "neli", - "thiserror 2.0.17", "windows-sys 0.61.2", ] @@ -2752,6 +2821,12 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "matchit" version = "0.8.4" @@ -2791,7 +2866,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2850,7 +2925,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2900,7 +2975,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "termcolor", "thiserror 1.0.69", ] @@ -2962,7 +3037,7 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", "security-framework 2.11.1", @@ -2996,7 +3071,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3059,9 +3134,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -3153,7 +3228,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3162,6 +3237,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "openssl-sys" version = "0.9.111" @@ -3241,7 +3322,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3264,7 +3345,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3338,7 +3419,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3414,9 +3495,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" @@ -3448,7 +3529,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.31", + "zerocopy 0.8.34", ] [[package]] @@ -3515,14 +3596,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -3535,16 +3616,16 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "version_check", "yansi", ] [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -3552,22 +3633,22 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "prost-types" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ "prost", ] @@ -3603,11 +3684,67 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.2", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -3641,9 +3778,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63417e83dc891797eea3ad379f52a5986da4bca0d6ef28baf4d14034dd111b0c" +checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" dependencies = [ "r2d2", "rusqlite", @@ -3674,7 +3811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -3694,7 +3831,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -3703,14 +3840,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -3746,9 +3883,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5" +checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" dependencies = [ "bitflags", ] @@ -3770,7 +3907,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3819,9 +3956,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.28" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" dependencies = [ "base64 0.22.1", "bytes", @@ -3833,21 +3970,21 @@ dependencies = [ "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", - "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", + "tokio-rustls", "tower", "tower-http", "tower-service", @@ -3865,7 +4002,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -3884,9 +4021,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ "bitvec", "bytecheck", @@ -3902,15 +4039,25 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.25.0" @@ -3948,7 +4095,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-ident", ] @@ -3966,15 +4113,15 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" dependencies = [ "bitflags", "fallible-iterator", @@ -3982,13 +4129,14 @@ dependencies = [ "hashlink", "libsqlite3-sys", "smallvec", + "sqlite-wasm-rs", ] [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "borsh", @@ -4002,9 +4150,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -4036,10 +4184,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -4051,11 +4200,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4072,19 +4221,48 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework 3.5.1", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4098,9 +4276,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4149,9 +4327,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" dependencies = [ "dyn-clone", "ref-cast", @@ -4260,7 +4438,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4270,7 +4448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "ryu", "serde_core", @@ -4278,16 +4456,16 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.146" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -4309,7 +4487,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4352,9 +4530,9 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.1.0", + "schemars 1.2.0", "serde_core", "serde_json", "serde_with_macros", @@ -4370,7 +4548,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4412,10 +4590,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -4461,14 +4640,26 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", "windows-sys 0.60.2", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -4496,7 +4687,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4507,14 +4698,14 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "subprocess" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" +checksum = "f75238edb5be30a9ea3035b945eb9c319dde80e879411cdc9a8978e1ac822960" dependencies = [ "libc", "winapi", @@ -4560,9 +4751,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -4586,7 +4777,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4635,9 +4826,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", "getrandom 0.3.4", @@ -4673,9 +4864,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1483605f58b2fff80d786eb56a0b6b4e8b1e5423fbc9ec2e3e562fa2040d6f27" +checksum = "a81ec0158db5fbb9831e09d1813fe5ea9023a2b5e6e8e0a5fe67e2a820733629" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4694,7 +4885,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -4722,11 +4913,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -4737,18 +4928,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4762,30 +4953,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -4828,16 +5019,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -4850,17 +5041,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.114", ] [[package]] @@ -4875,9 +5056,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -4886,9 +5067,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -4911,11 +5092,11 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.10+spec-1.1.0" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0825052159284a1a8b4d6c0c86cbc801f2da5afd2b225fa548c72f2e74002f48" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde_core", "serde_spanned 1.0.4", "toml_datetime 0.7.5+spec-1.1.0", @@ -4948,7 +5129,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -4962,7 +5143,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -5008,7 +5189,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.1", + "socket2 0.6.2", "sync_wrapper", "tokio", "tokio-stream", @@ -5118,7 +5299,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -5149,7 +5330,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-server-lib", "torrust-tracker-configuration", @@ -5165,7 +5346,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "url", "uuid", ] @@ -5220,7 +5401,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-axum-health-check-api-server", @@ -5256,7 +5437,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "tracing", @@ -5284,8 +5465,8 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", - "toml 0.9.10+spec-1.1.0", + "thiserror 2.0.18", + "toml 0.9.11+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5298,7 +5479,7 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion 0.8.1", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5314,7 +5495,7 @@ dependencies = [ name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -5330,7 +5511,7 @@ dependencies = [ "rstest 0.25.0", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-primitives", "tracing", ] @@ -5347,7 +5528,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-configuration", "url", "zerocopy 0.7.35", @@ -5368,7 +5549,7 @@ dependencies = [ "rand 0.9.2", "rstest 0.26.1", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -5427,7 +5608,7 @@ dependencies = [ "rand 0.9.2", "ringbuf", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-server-lib", @@ -5446,13 +5627,13 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5519,7 +5700,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5674,14 +5855,15 @@ dependencies = [ [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -5704,9 +5886,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -5765,18 +5947,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -5787,11 +5969,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -5800,9 +5983,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5810,31 +5993,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -5850,11 +6033,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" dependencies = [ "rustls-pki-types", ] @@ -5911,7 +6103,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5922,7 +6114,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5960,6 +6152,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -5987,6 +6188,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -6020,6 +6236,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -6032,6 +6254,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -6044,6 +6272,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -6068,6 +6302,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -6080,6 +6320,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -6092,6 +6338,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -6104,6 +6356,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -6127,9 +6385,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -6181,7 +6439,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -6197,11 +6455,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ - "zerocopy-derive 0.8.31", + "zerocopy-derive 0.8.34", ] [[package]] @@ -6212,18 +6470,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6243,7 +6501,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -6283,9 +6541,15 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] +[[package]] +name = "zmij" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" + [[package]] name = "zstd" version = "0.13.3" From 457a020c704d3ec084c0df222c15ca00e1c83f82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 13:06:09 +0000 Subject: [PATCH 226/247] fix: enable reqwest query feature for API compatibility reqwest 0.13 made the feature optional and disabled by default. This commit adds the feature to the reqwest dependency in the rest-tracker-api-client package to restore query parameter functionality. --- Cargo.lock | 1 + packages/rest-tracker-api-client/Cargo.toml | 2 +- .../rest-tracker-api-client/src/v1/client.rs | 20 +++++++++---------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 146da3a18..8916a6640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3982,6 +3982,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper", "tokio", "tokio-rustls", diff --git a/packages/rest-tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml index cba580e18..c01b9c05a 100644 --- a/packages/rest-tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true [dependencies] hyper = "1" -reqwest = { version = "0", features = ["json"] } +reqwest = { version = "0", features = ["json", "query"] } serde = { version = "1", features = ["derive"] } thiserror = "2" url = { version = "2", features = ["serde"] } diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index 3137b8b41..02a5b0d9c 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -204,22 +204,22 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn get(path: Url, query: Option, headers: Option) -> Response { - let builder = reqwest::Client::builder() + let client = reqwest::Client::builder() .timeout(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_IN_SECS)) .build() .unwrap(); - let builder = match query { - Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), - None => builder.get(path), - }; + let mut request_builder = client.get(path); - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; + if let Some(params) = query { + request_builder = request_builder.query(&ReqwestQuery::from(params)); + } + + if let Some(headers) = headers { + request_builder = request_builder.headers(headers); + } - builder.send().await.unwrap() + request_builder.send().await.unwrap() } /// Returns a `HeaderMap` with a request id header. From ac47c1b26a068cf371c35fe40660ccfb564f1de2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 14:08:13 +0000 Subject: [PATCH 227/247] fix: suppress clippy warnings for large error types in config tests --- packages/configuration/src/v2_0_0/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index 8391ba0e1..b3fbc881e 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -521,6 +521,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -552,6 +553,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -581,6 +583,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -613,6 +616,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_toml_config_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -646,6 +650,7 @@ mod tests { }); } + #[allow(clippy::result_large_err)] #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { figment::Jail::expect_with(|jail| { From 046d5c982e34f7ace34e5e5355c8b72f540ad583 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Feb 2026 11:08:46 +0000 Subject: [PATCH 228/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 98 packages to latest compatible versions Updating anyhow v1.0.100 -> v1.0.102 Updating arc-swap v1.8.0 -> v1.8.2 Updating async-compression v0.4.37 -> v0.4.40 Updating async-executor v1.13.3 -> v1.14.0 Updating aws-lc-rs v1.15.4 -> v1.16.0 Updating aws-lc-sys v0.37.0 -> v0.37.1 Updating bitflags v2.10.0 -> v2.11.0 Updating bollard v0.19.4 -> v0.20.1 Updating bollard-stubs v1.49.1-rc.28.4.0 -> v1.52.1-rc.29.1.3 Updating bumpalo v3.19.1 -> v3.20.2 Updating bytemuck v1.24.0 -> v1.25.0 Updating bytes v1.11.0 -> v1.11.1 Updating cc v1.2.54 -> v1.2.56 Adding chacha20 v0.10.0 Adding cipher v0.5.0 Updating clap v4.5.54 -> v4.5.60 Updating clap_builder v4.5.54 -> v4.5.60 Updating clap_derive v4.5.49 -> v4.5.55 Updating clap_lex v0.7.7 -> v1.0.0 Updating compression-codecs v0.4.36 -> v0.4.37 Adding cpufeatures v0.3.0 Updating criterion v0.8.1 -> v0.8.2 Updating criterion-plot v0.8.1 -> v0.8.2 Adding crypto-common v0.2.0 Updating deranged v0.5.5 -> v0.5.6 Adding env_filter v1.0.0 Updating env_logger v0.8.4 -> v0.11.9 Updating find-msvc-tools v0.1.8 -> v0.1.9 Updating flate2 v1.1.8 -> v1.1.9 Updating fs-err v3.2.2 -> v3.3.0 Updating futures v0.3.31 -> v0.3.32 Updating futures-channel v0.3.31 -> v0.3.32 Updating futures-core v0.3.31 -> v0.3.32 Updating futures-executor v0.3.31 -> v0.3.32 Updating futures-io v0.3.31 -> v0.3.32 Updating futures-macro v0.3.31 -> v0.3.32 Updating futures-sink v0.3.31 -> v0.3.32 Updating futures-task v0.3.31 -> v0.3.32 Updating futures-util v0.3.31 -> v0.3.32 Adding getrandom v0.4.1 Adding hybrid-array v0.4.7 Updating hyper-util v0.1.19 -> v0.1.20 Updating iana-time-zone v0.1.64 -> v0.1.65 Adding id-arena v2.3.0 Adding inout v0.2.2 Adding leb128fmt v0.1.0 Updating libc v0.2.180 -> v0.2.182 Updating local-ip-address v0.6.9 -> v0.6.10 Updating memchr v2.7.6 -> v2.8.0 Updating native-tls v0.2.14 -> v0.2.18 Updating neli v0.7.3 -> v0.7.4 Removing openssl-probe v0.1.6 Updating portable-atomic v1.13.0 -> v1.13.1 Updating portable-atomic-util v0.2.4 -> v0.2.5 Updating predicates v3.1.3 -> v3.1.4 Updating predicates-core v1.0.9 -> v1.0.10 Updating predicates-tree v1.0.12 -> v1.0.13 Adding prettyplease v0.2.37 Updating quickcheck v1.0.3 -> v1.1.0 Adding rand v0.10.0 Adding rand_core v0.10.0 Updating redox_syscall v0.7.0 -> v0.7.1 Updating regex v1.12.2 -> v1.12.3 Updating regex-automata v0.4.13 -> v0.4.14 Updating regex-syntax v0.8.8 -> v0.8.9 Updating reqwest v0.13.1 -> v0.13.2 Removing rustls-pemfile v2.2.0 Updating ryu v1.0.22 -> v1.0.23 Updating schemars v1.2.0 -> v1.2.1 Removing security-framework v2.11.1 Removing security-framework v3.5.1 Adding security-framework v3.7.0 Updating security-framework-sys v2.15.0 -> v2.17.0 Updating siphasher v1.0.1 -> v1.0.2 Updating slab v0.4.11 -> v0.4.12 Updating subprocess v0.2.13 -> v0.2.15 Updating syn v2.0.114 -> v2.0.117 Updating system-configuration v0.6.1 -> v0.7.0 Updating tempfile v3.24.0 -> v3.25.0 Updating testcontainers v0.26.3 -> v0.27.0 Updating time v0.3.46 -> v0.3.47 Updating time-macros v0.2.26 -> v0.2.27 Updating toml v0.9.11+spec-1.1.0 -> v0.9.12+spec-1.1.0 (available: v1.0.3+spec-1.1.0) Updating toml_parser v1.0.6+spec-1.1.0 -> v1.0.9+spec-1.1.0 Updating tonic v0.14.2 -> v0.14.5 Updating tonic-prost v0.14.2 -> v0.14.5 Updating unicode-ident v1.0.22 -> v1.0.24 Updating ureq v3.1.4 -> v3.2.0 Updating uuid v1.20.0 -> v1.21.0 Adding wasip3 v0.4.0+wasi-0.3.0-rc-2026-01-06 Adding wasm-encoder v0.244.0 Adding wasm-metadata v0.244.0 Adding wasmparser v0.244.0 Updating webpki-root-certs v1.0.5 -> v1.0.6 Removing webpki-roots v1.0.5 Adding wit-bindgen-core v0.51.0 Adding wit-bindgen-rust v0.51.0 Adding wit-bindgen-rust-macro v0.51.0 Adding wit-component v0.244.0 Adding wit-parser v0.244.0 Updating zerocopy v0.8.34 -> v0.8.39 Updating zerocopy-derive v0.8.34 -> v0.8.39 Updating zmij v1.0.17 -> v1.0.21 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 767 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 485 insertions(+), 282 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8916a6640..e801b94cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,9 +134,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.100" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "approx" @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" dependencies = [ "rustversion", ] @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" +checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" dependencies = [ "compression-codecs", "compression-core", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.3" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" dependencies = [ "async-task", "concurrent-queue", @@ -354,7 +354,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -371,7 +371,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.4" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" dependencies = [ "cc", "cmake", @@ -514,7 +514,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -609,7 +609,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -620,9 +620,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "bittorrent-http-tracker-core" @@ -721,7 +721,7 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.2", + "rand 0.10.0", "serde", "serde_json", "testcontainers", @@ -751,12 +751,12 @@ dependencies = [ "bittorrent-udp-tracker-protocol", "bloom", "blowfish", - "cipher", + "cipher 0.5.0", "criterion 0.5.1", "futures", "lazy_static", "mockall", - "rand 0.9.2", + "rand 0.10.0", "serde", "thiserror 2.0.18", "tokio", @@ -831,14 +831,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" dependencies = [ "byteorder", - "cipher", + "cipher 0.4.4", ] [[package]] name = "bollard" -version = "0.19.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" +checksum = "227aa051deec8d16bd9c34605e7aaf153f240e35483dd42f6f78903847934738" dependencies = [ "async-stream", "base64 0.22.1", @@ -846,7 +846,6 @@ dependencies = [ "bollard-buildkit-proto", "bollard-stubs", "bytes", - "chrono", "futures-core", "futures-util", "hex", @@ -864,14 +863,13 @@ dependencies = [ "rand 0.9.2", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_derive", "serde_json", - "serde_repr", "serde_urlencoded", "thiserror 2.0.18", + "time", "tokio", "tokio-stream", "tokio-util", @@ -896,19 +894,18 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.49.1-rc.28.4.0" +version = "1.52.1-rc.29.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" +checksum = "0f0a8ca8799131c1837d1282c3f81f31e76ceb0ce426e04a7fe1ccee3287c066" dependencies = [ "base64 0.22.1", "bollard-buildkit-proto", "bytes", - "chrono", "prost", "serde", "serde_json", "serde_repr", - "serde_with", + "time", ] [[package]] @@ -931,7 +928,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -972,9 +969,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.19.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "bytecheck" @@ -1000,9 +997,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.24.0" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" [[package]] name = "byteorder" @@ -1012,9 +1009,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "camino" @@ -1042,9 +1039,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.54" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -1079,6 +1076,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.0", +] + [[package]] name = "chrono" version = "0.4.43" @@ -1124,8 +1132,18 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common", - "inout", + "crypto-common 0.1.7", + "inout 0.1.4", +] + +[[package]] +name = "cipher" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64727038c8c5e2bb503a15b9f5b9df50a1da9a33e83e1f93067d914f2c6604a5" +dependencies = [ + "crypto-common 0.2.0", + "inout 0.2.2", ] [[package]] @@ -1141,9 +1159,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.54" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -1151,9 +1169,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -1163,21 +1181,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "cmake" @@ -1219,9 +1237,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" dependencies = [ "brotli", "compression-core", @@ -1290,6 +1308,15 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -1329,16 +1356,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot 0.8.1", + "criterion-plot 0.8.2", "itertools 0.13.0", "num-traits", "oorandom", @@ -1365,9 +1392,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", "itertools 0.13.0", @@ -1455,6 +1482,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-common" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "211f05e03c7d03754740fd9e585de910a095d6b99f8bcfffdef8319fa02a8331" +dependencies = [ + "hybrid-array", +] + [[package]] name = "darling" version = "0.20.11" @@ -1486,7 +1522,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1500,7 +1536,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1511,7 +1547,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1522,7 +1558,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1541,9 +1577,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" dependencies = [ "powerfmt", "serde_core", @@ -1567,7 +1603,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1577,7 +1613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1599,7 +1635,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-xid", ] @@ -1611,7 +1647,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1627,7 +1663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", - "crypto-common", + "crypto-common 0.1.7", ] [[package]] @@ -1638,7 +1674,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1686,15 +1722,25 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", ] +[[package]] +name = "env_logger" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -1806,15 +1852,15 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "flate2" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", "libz-sys", @@ -1918,7 +1964,7 @@ checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1930,7 +1976,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1942,14 +1988,14 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "fs-err" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" dependencies = [ "autocfg", "tokio", @@ -1969,9 +2015,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1984,9 +2030,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1994,15 +2040,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -2011,9 +2057,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" @@ -2030,26 +2076,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -2059,9 +2105,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -2071,7 +2117,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -2112,6 +2157,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "rand_core 0.10.0", + "wasip2", + "wasip3", +] + [[package]] name = "getset" version = "0.1.6" @@ -2121,7 +2180,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2175,7 +2234,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.34", + "zerocopy 0.8.39", ] [[package]] @@ -2300,6 +2359,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hybrid-array" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b229d73f5803b562cc26e4da0396c8610a4ee209f4fac8fa4f8d709166dc45" +dependencies = [ + "typenum", +] + [[package]] name = "hyper" version = "1.8.1" @@ -2369,14 +2437,13 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", - "futures-core", "futures-util", "http", "http-body", @@ -2410,9 +2477,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.64" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2513,6 +2580,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -2578,6 +2651,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "inout" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" +dependencies = [ + "hybrid-array", +] + [[package]] name = "io-enum" version = "1.2.0" @@ -2716,11 +2798,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.180" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -2746,7 +2834,7 @@ checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags", "libc", - "redox_syscall 0.7.0", + "redox_syscall 0.7.1", ] [[package]] @@ -2785,9 +2873,9 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" +checksum = "79ef8c257c92ade496781a32a581d43e3d512cf8ce714ecf04ea80f93ed0ff4a" dependencies = [ "libc", "neli", @@ -2835,9 +2923,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "miette" @@ -2866,7 +2954,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2925,7 +3013,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2975,7 +3063,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "termcolor", "thiserror 1.0.69", ] @@ -3030,26 +3118,26 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2" dependencies = [ "libc", "log", "openssl", - "openssl-probe 0.1.6", + "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "neli" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" +checksum = "22f9786d56d972959e1408b6a93be6af13b9c1392036c5c1fafa08a1b0c6ee87" dependencies = [ "bitflags", "byteorder", @@ -3071,7 +3159,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3228,15 +3316,9 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" @@ -3322,7 +3404,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3345,7 +3427,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3419,7 +3501,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3495,15 +3577,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" dependencies = [ "portable-atomic", ] @@ -3529,14 +3611,14 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.34", + "zerocopy 0.8.39", ] [[package]] name = "predicates" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" dependencies = [ "anstyle", "predicates-core", @@ -3544,15 +3626,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" [[package]] name = "predicates-tree" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" dependencies = [ "predicates-core", "termtree", @@ -3568,6 +3650,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + [[package]] name = "proc-macro-crate" version = "3.4.0" @@ -3596,7 +3688,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3616,7 +3708,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "version_check", "yansi", ] @@ -3641,7 +3733,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3675,13 +3767,13 @@ dependencies = [ [[package]] name = "quickcheck" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ "env_logger", "log", - "rand 0.8.5", + "rand 0.10.0", ] [[package]] @@ -3814,6 +3906,17 @@ dependencies = [ "rand_core 0.9.5", ] +[[package]] +name = "rand" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" +dependencies = [ + "chacha20", + "getrandom 0.4.1", + "rand_core 0.10.0", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -3852,6 +3955,12 @@ dependencies = [ "getrandom 0.3.4", ] +[[package]] +name = "rand_core" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" + [[package]] name = "rayon" version = "1.11.0" @@ -3883,9 +3992,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" dependencies = [ "bitflags", ] @@ -3907,14 +4016,14 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -3924,9 +4033,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -3935,9 +4044,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "relative-path" @@ -3956,9 +4065,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", @@ -4096,7 +4205,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -4114,7 +4223,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -4205,19 +4314,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", -] - -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", + "security-framework", ] [[package]] @@ -4245,7 +4345,7 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.5.1", + "security-framework", "security-framework-sys", "webpki-root-certs", "windows-sys 0.61.2", @@ -4277,9 +4377,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -4328,9 +4428,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" dependencies = [ "dyn-clone", "ref-cast", @@ -4352,22 +4452,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.11.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -4378,9 +4465,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -4439,7 +4526,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4488,7 +4575,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4533,7 +4620,7 @@ dependencies = [ "indexmap 1.9.3", "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.2.0", + "schemars 1.2.1", "serde_core", "serde_json", "serde_with_macros", @@ -4549,7 +4636,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4559,7 +4646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -4570,7 +4657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -4613,15 +4700,15 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" @@ -4688,7 +4775,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4699,14 +4786,14 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "subprocess" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75238edb5be30a9ea3035b945eb9c319dde80e879411cdc9a8978e1ac822960" +checksum = "2c56e8662b206b9892d7a5a3f2ecdbcb455d3d6b259111373b7e08b8055158a8" dependencies = [ "libc", "winapi", @@ -4752,9 +4839,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -4778,14 +4865,14 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ "bitflags", "core-foundation 0.9.4", @@ -4827,12 +4914,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.24.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.1", "once_cell", "rustix", "windows-sys 0.61.2", @@ -4865,9 +4952,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81ec0158db5fbb9831e09d1813fe5ea9023a2b5e6e8e0a5fe67e2a820733629" +checksum = "c3fdcea723c64cc08dbc533b3761e345a15bf1222cbe6cb611de09b43f17a168" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4878,6 +4965,7 @@ dependencies = [ "etcetera", "ferroid", "futures", + "http", "itertools 0.14.0", "log", "memchr", @@ -4929,7 +5017,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4940,7 +5028,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4954,9 +5042,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -4975,9 +5063,9 @@ checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -5042,7 +5130,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5093,9 +5181,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap 2.13.0", "serde_core", @@ -5152,9 +5240,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] @@ -5173,9 +5261,9 @@ checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "axum", @@ -5202,9 +5290,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost", @@ -5256,7 +5344,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.2", + "rand 0.10.0", "reqwest", "serde", "serde_bencode", @@ -5397,7 +5485,7 @@ dependencies = [ "clap", "local-ip-address", "mockall", - "rand 0.9.2", + "rand 0.10.0", "regex", "reqwest", "serde", @@ -5467,7 +5555,7 @@ dependencies = [ "serde_json", "serde_with", "thiserror 2.0.18", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5479,7 +5567,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.8.1", + "criterion 0.8.2", "thiserror 2.0.18", ] @@ -5543,11 +5631,11 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.8.1", + "criterion 0.8.2", "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.2", + "rand 0.10.0", "rstest 0.26.1", "serde", "thiserror 2.0.18", @@ -5566,7 +5654,7 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.2", + "rand 0.10.0", "torrust-tracker-configuration", "tracing", "tracing-subscriber", @@ -5579,7 +5667,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.8.1", + "criterion 0.8.2", "crossbeam-skiplist", "dashmap", "futures", @@ -5606,7 +5694,7 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.2", + "rand 0.10.0", "ringbuf", "serde", "thiserror 2.0.18", @@ -5701,7 +5789,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5786,9 +5874,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-linebreak" @@ -5828,9 +5916,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "3.1.4" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39cb1dbab692d82a977c0392ffac19e188bd9186a9f32806f0aaa859d75585a" +checksum = "fdc97a28575b85cfedf2a7e7d3cc64b3e11bd8ac766666318003abbacc7a21fc" dependencies = [ "base64 0.22.1", "log", @@ -5839,7 +5927,6 @@ dependencies = [ "rustls-pki-types", "ureq-proto", "utf-8", - "webpki-roots", ] [[package]] @@ -5887,11 +5974,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.4", + "getrandom 0.4.1", "js-sys", "rand 0.9.2", "wasm-bindgen", @@ -5955,6 +6042,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.108" @@ -6001,7 +6097,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] @@ -6014,6 +6110,40 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.13.0", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.13.0", + "semver", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -6036,18 +6166,9 @@ dependencies = [ [[package]] name = "webpki-root-certs" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" dependencies = [ "rustls-pki-types", ] @@ -6104,7 +6225,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6115,7 +6236,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6389,6 +6510,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.13.0", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.13.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.13.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" @@ -6440,7 +6643,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -6456,11 +6659,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ - "zerocopy-derive 0.8.34", + "zerocopy-derive 0.8.39", ] [[package]] @@ -6471,18 +6674,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zerocopy-derive" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6502,7 +6705,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -6542,14 +6745,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.17" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" From f737ace07747c99ef6d393d7cacdff4d7083532b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Feb 2026 11:58:56 +0000 Subject: [PATCH 229/247] fix: resolve compilation errors after dependency updates BREAKING CHANGE: cipher crate pinned to v0.4 for compatibility with blowfish - Replace Rng import with RngExt for sample_iter method in rand 0.10 - Pin cipher crate to v0.4 to match blowfish dependency constraints - Add explicit generic-array dependency to udp-tracker-core - Import GenericArray directly from generic_array crate - Update Keeper trait in crypto/keys.rs to use BlockEncrypt + BlockDecrypt bounds - Add BlockEncrypt and BlockDecrypt trait imports to connection_cookie.rs - Fix imports in: - packages/tracker-core/src/authentication/key/peer_key.rs - packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs - packages/udp-tracker-core/src/crypto/keys.rs - packages/test-helpers/src/random.rs - src/console/ci/e2e/tracker_container.rs --- Cargo.lock | 48 +++---------------- packages/test-helpers/src/random.rs | 2 +- .../src/authentication/key/peer_key.rs | 2 +- packages/tracker-core/src/test_helpers.rs | 2 +- packages/udp-tracker-core/Cargo.toml | 3 +- .../src/crypto/ephemeral_instance_keys.rs | 4 +- packages/udp-tracker-core/src/crypto/keys.rs | 4 +- src/console/ci/e2e/tracker_container.rs | 2 +- 8 files changed, 17 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e801b94cb..c6b151951 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -751,9 +751,10 @@ dependencies = [ "bittorrent-udp-tracker-protocol", "bloom", "blowfish", - "cipher 0.5.0", + "cipher", "criterion 0.5.1", "futures", + "generic-array", "lazy_static", "mockall", "rand 0.10.0", @@ -831,7 +832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" dependencies = [ "byteorder", - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1132,18 +1133,8 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.7", - "inout 0.1.4", -] - -[[package]] -name = "cipher" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64727038c8c5e2bb503a15b9f5b9df50a1da9a33e83e1f93067d914f2c6604a5" -dependencies = [ - "crypto-common 0.2.0", - "inout 0.2.2", + "crypto-common", + "inout", ] [[package]] @@ -1482,15 +1473,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-common" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "211f05e03c7d03754740fd9e585de910a095d6b99f8bcfffdef8319fa02a8331" -dependencies = [ - "hybrid-array", -] - [[package]] name = "darling" version = "0.20.11" @@ -1663,7 +1645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", - "crypto-common 0.1.7", + "crypto-common", ] [[package]] @@ -2359,15 +2341,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hybrid-array" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b229d73f5803b562cc26e4da0396c8610a4ee209f4fac8fa4f8d709166dc45" -dependencies = [ - "typenum", -] - [[package]] name = "hyper" version = "1.8.1" @@ -2651,15 +2624,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "inout" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" -dependencies = [ - "hybrid-array", -] - [[package]] name = "io-enum" version = "1.2.0" diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs index f096d695c..62265dbd7 100644 --- a/packages/test-helpers/src/random.rs +++ b/packages/test-helpers/src/random.rs @@ -1,6 +1,6 @@ //! Random data generators for testing. use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; /// Returns a random alphanumeric string of a certain size. /// diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 41aba950b..ba648ad2f 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -13,7 +13,7 @@ use std::time::Duration; use derive_more::Display; use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 62649cd22..bf21e6f94 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -7,7 +7,7 @@ pub(crate) mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; - use rand::Rng; + use rand::RngExt; use torrust_tracker_configuration::Configuration; #[cfg(test)] use torrust_tracker_configuration::Core; diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index b3007eb80..aa12f898f 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -20,9 +20,10 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } bloom = "0.3.2" blowfish = "0" -cipher = "0" +cipher = "0.4" criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" +generic-array = "0" lazy_static = "1" rand = "0" serde = "1.0.219" diff --git a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs index 58ba70562..de40e4b1d 100644 --- a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs +++ b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs @@ -4,10 +4,10 @@ //! application starts and are not persisted anywhere. use blowfish::BlowfishLE; -use cipher::generic_array::GenericArray; use cipher::{BlockSizeUser, KeyInit}; +use generic_array::GenericArray; use rand::rngs::ThreadRng; -use rand::Rng; +use rand::RngExt; pub type Seed = [u8; 32]; pub type CipherBlowfish = BlowfishLE; diff --git a/packages/udp-tracker-core/src/crypto/keys.rs b/packages/udp-tracker-core/src/crypto/keys.rs index f9a3e361d..bb813b9dc 100644 --- a/packages/udp-tracker-core/src/crypto/keys.rs +++ b/packages/udp-tracker-core/src/crypto/keys.rs @@ -5,6 +5,8 @@ //! //! It also provides the logic for the cipher for encryption and decryption. +use cipher::{BlockDecrypt, BlockEncrypt}; + use self::detail_cipher::CURRENT_CIPHER; use self::detail_seed::CURRENT_SEED; pub use crate::crypto::ephemeral_instance_keys::CipherArrayBlowfish; @@ -13,7 +15,7 @@ use crate::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER /// This trait is for structures that can keep and provide a seed. pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; - type Cipher: cipher::BlockCipher; + type Cipher: BlockEncrypt + BlockDecrypt; /// It returns a reference to the seed that is keeping. fn get_seed() -> &'static Self::Seed; diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index a3845c103..1a7717a41 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -1,7 +1,7 @@ use std::time::Duration; use rand::distr::Alphanumeric; -use rand::Rng; +use rand::RngExt; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; From ed0937bfe450a306de84f90660683c7a7fb16f56 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 07:55:53 +0000 Subject: [PATCH 230/247] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 35 packages to latest compatible versions Updating async-compression v0.4.40 -> v0.4.41 Updating aws-lc-rs v1.16.0 -> v1.16.1 Updating aws-lc-sys v0.37.1 -> v0.38.0 Updating chrono v0.4.43 -> v0.4.44 Updating deranged v0.5.6 -> v0.5.8 Updating derive_utils v0.15.0 -> v0.15.1 Updating io-enum v1.2.0 -> v1.2.1 Updating ipnet v2.11.0 -> v2.12.0 Updating js-sys v0.3.85 -> v0.3.91 Updating libredox v0.1.12 -> v0.1.14 Updating libz-sys v1.1.23 -> v1.1.24 Updating linux-raw-sys v0.11.0 -> v0.12.1 Updating owo-colors v4.2.3 -> v4.3.0 Updating pin-project v1.1.10 -> v1.1.11 Updating pin-project-internal v1.1.10 -> v1.1.11 Updating pin-project-lite v0.2.16 -> v0.2.17 Updating piper v0.2.4 -> v0.2.5 Adding plain v0.2.3 Updating redox_syscall v0.7.1 -> v0.7.3 Updating regex-syntax v0.8.9 -> v0.8.10 Updating rustix v1.1.3 -> v1.1.4 Updating rustls v0.23.36 -> v0.23.37 Updating serde_with v3.16.1 -> v3.17.0 Updating serde_with_macros v3.16.1 -> v3.17.0 Updating tempfile v3.25.0 -> v3.26.0 Updating testcontainers v0.27.0 -> v0.27.1 Updating tokio-macros v2.6.0 -> v2.6.1 Updating wasm-bindgen v0.2.108 -> v0.2.114 Updating wasm-bindgen-futures v0.4.58 -> v0.4.64 Updating wasm-bindgen-macro v0.2.108 -> v0.2.114 Updating wasm-bindgen-macro-support v0.2.108 -> v0.2.114 Updating wasm-bindgen-shared v0.2.108 -> v0.2.114 Updating web-sys v0.3.85 -> v0.3.91 Updating zerocopy v0.8.39 -> v0.8.40 Updating zerocopy-derive v0.8.39 -> v0.8.40 note: pass `--verbose` to see 9 unchanged dependencies behind latest ``` --- Cargo.lock | 151 ++++++++++++++++++++++++++++------------------------- 1 file changed, 79 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6b151951..6894e2bcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" +checksum = "d0f9ee0f6e02ffd7ad5816e9464499fba7b3effd01123b515c41d1697c43dad1" dependencies = [ "compression-codecs", "compression-core", @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" dependencies = [ "cc", "cmake", @@ -1090,9 +1090,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "num-traits", @@ -1559,9 +1559,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", "serde_core", @@ -1623,9 +1623,9 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" +checksum = "362f47930db19fe7735f527e6595e4900316b893ebf6d48ad3d31be928d57dd6" dependencies = [ "proc-macro2", "quote", @@ -2216,7 +2216,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.39", + "zerocopy 0.8.40", ] [[package]] @@ -2626,18 +2626,18 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d197db2f7ebf90507296df3aebaf65d69f5dce8559d8dbd82776a6cadab61bbf" +checksum = "7de9008599afe8527a8c9d70423437363b321649161e98473f433de802d76107" dependencies = [ "derive_utils", ] [[package]] name = "ipnet" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" @@ -2739,9 +2739,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", @@ -2792,13 +2792,14 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" dependencies = [ "bitflags", "libc", - "redox_syscall 0.7.1", + "plain", + "redox_syscall 0.7.3", ] [[package]] @@ -2814,9 +2815,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" dependencies = [ "cc", "pkg-config", @@ -2825,9 +2826,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -3303,9 +3304,9 @@ dependencies = [ [[package]] name = "owo-colors" -version = "4.2.3" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" [[package]] name = "page_size" @@ -3450,18 +3451,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", @@ -3470,9 +3471,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "pin-utils" @@ -3482,9 +3483,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +checksum = "c835479a4443ded371d6c535cbfd8d31ad92c5d23ae9770a61bc155e4992a3c1" dependencies = [ "atomic-waker", "fastrand", @@ -3497,6 +3498,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "plotters" version = "0.3.7" @@ -3575,7 +3582,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.39", + "zerocopy 0.8.40", ] [[package]] @@ -3956,9 +3963,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" dependencies = [ "bitflags", ] @@ -4008,9 +4015,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "relative-path" @@ -4245,9 +4252,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ "bitflags", "errno", @@ -4258,9 +4265,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.36" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ "aws-lc-rs", "log", @@ -4574,9 +4581,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" dependencies = [ "base64 0.22.1", "chrono", @@ -4593,9 +4600,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" dependencies = [ "darling 0.21.3", "proc-macro2", @@ -4878,9 +4885,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.25.0" +version = "3.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" dependencies = [ "fastrand", "getrandom 0.4.1", @@ -4916,9 +4923,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fdcea723c64cc08dbc533b3761e345a15bf1222cbe6cb611de09b43f17a168" +checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068" dependencies = [ "astral-tokio-tar", "async-trait", @@ -5088,9 +5095,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" dependencies = [ "proc-macro2", "quote", @@ -6017,9 +6024,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -6030,9 +6037,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" dependencies = [ "cfg-if", "futures-util", @@ -6044,9 +6051,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6054,9 +6061,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ "bumpalo", "proc-macro2", @@ -6067,9 +6074,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] @@ -6110,9 +6117,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -6623,11 +6630,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" dependencies = [ - "zerocopy-derive 0.8.39", + "zerocopy-derive 0.8.40", ] [[package]] @@ -6643,9 +6650,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" dependencies = [ "proc-macro2", "quote", From 29edbb6d154f2230d7695d2bd47ca5084fbc53a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 08:04:34 +0000 Subject: [PATCH 231/247] fix: collapse nested if into match arm guard in pagination test Resolves clippy::collapsible_match lint by moving the inner if condition into the match arm guard for the Pagination { limit: 1, offset: 1 } case. --- .../tests/repository/mod.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index c3589ce68..ec7e68bae 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -364,12 +364,10 @@ async fn it_should_get_paginated( } // it should return the only the second entry if both the limit and the offset are one. - Pagination { limit: 1, offset: 1 } => { - if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)).await; - assert_eq!(page.len(), 1); - assert_eq!(page[0].0, info_hashes[1]); - } + Pagination { limit: 1, offset: 1 } if info_hashes.len() > 1 => { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); } // the other cases are not yet tested. _ => {} From 7e322eb7bf766f2a6c5ee376b42deae21b28bcd9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 08:08:11 +0000 Subject: [PATCH 232/247] ci: upgrade actions/upload-artifact from v6 to v7 in generate_coverage_pr workflow --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index f762207cf..a3f97dbf2 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: codecov_report path: ./codecov.json From de471450fb2a555816b111cd59ae03dade2b6fcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 18:14:57 +0000 Subject: [PATCH 233/247] fix: add sleep after HTTP server stop in health check test to avoid race condition --- packages/axum-health-check-api-server/tests/server/contract.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 1d1ba3539..af1c0cff9 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -202,6 +202,9 @@ mod http { service.server.stop().await.expect("it should stop udp server"); + // Give the OS a moment to fully release the TCP port after the server stops. + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; From 1228a2b986e41fa195fd5418a5fecd028add1524 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Apr 2026 19:07:53 +0100 Subject: [PATCH 234/247] chore(deps): update dependencies ``` Updating crates.io index Locking 104 packages to latest compatible versions Updating anstream v0.6.21 -> v1.0.0 Updating anstyle v1.0.13 -> v1.0.14 Updating anstyle-parse v0.2.7 -> v1.0.0 Updating arc-swap v1.8.2 -> v1.9.1 Updating astral-tokio-tar v0.5.6 -> v0.6.0 Updating aws-lc-rs v1.16.1 -> v1.16.2 Updating aws-lc-sys v0.38.0 -> v0.39.1 Updating bollard v0.20.1 -> v0.20.2 Updating borsh v1.6.0 -> v1.6.1 Updating borsh-derive v1.6.0 -> v1.6.1 Updating cc v1.2.56 -> v1.2.59 Updating clap v4.5.60 -> v4.6.0 Updating clap_builder v4.5.60 -> v4.6.0 Updating clap_derive v4.5.55 -> v4.6.0 Updating clap_lex v1.0.0 -> v1.1.0 Updating cmake v0.1.57 -> v0.1.58 Updating colorchoice v1.0.4 -> v1.0.5 Updating darling v0.21.3 -> v0.23.0 Updating darling_core v0.21.3 -> v0.23.0 Updating darling_macro v0.21.3 -> v0.23.0 Updating env_filter v1.0.0 -> v1.0.1 Updating env_logger v0.11.9 -> v0.11.10 Updating fastrand v2.3.0 -> v2.4.1 Updating fragile v2.0.1 -> v2.1.0 Updating getrandom v0.4.1 -> v0.4.2 Updating hyper v1.8.1 -> v1.9.0 Updating icu_collections v2.1.1 -> v2.2.0 Updating icu_locale_core v2.1.1 -> v2.2.0 Updating icu_normalizer v2.1.1 -> v2.2.0 Updating icu_normalizer_data v2.1.1 -> v2.2.0 Updating icu_properties v2.1.2 -> v2.2.0 Updating icu_properties_data v2.1.2 -> v2.2.0 Updating icu_provider v2.1.1 -> v2.2.0 Updating indexmap v2.13.0 -> v2.13.1 Updating iri-string v0.7.10 -> v0.7.12 Updating itoa v1.0.17 -> v1.0.18 Removing jni-sys v0.3.0 Adding jni-sys v0.3.1 Adding jni-sys v0.4.1 Adding jni-sys-macros v0.4.1 Updating js-sys v0.3.91 -> v0.3.94 Updating libc v0.2.182 -> v0.2.184 Updating libredox v0.1.14 -> v0.1.15 Updating libsqlite3-sys v0.36.0 -> v0.37.0 Updating libz-sys v1.1.24 -> v1.1.28 Updating litemap v0.8.1 -> v0.8.2 Updating local-ip-address v0.6.10 -> v0.6.11 Updating mio v1.1.1 -> v1.2.0 Updating num-conv v0.2.0 -> v0.2.1 Updating once_cell v1.21.3 -> v1.21.4 Updating openssl v0.10.75 -> v0.10.76 Updating openssl-sys v0.9.111 -> v0.9.112 Updating portable-atomic-util v0.2.5 -> v0.2.6 Updating potential_utf v0.1.4 -> v0.1.5 Updating proc-macro-crate v3.4.0 -> v3.5.0 Updating quinn-proto v0.11.13 -> v0.11.14 Updating quote v1.0.44 -> v1.0.45 Adding r-efi v6.0.0 Updating r2d2_sqlite v0.32.0 -> v0.33.0 Updating rusqlite v0.38.0 -> v0.39.0 Updating rust_decimal v1.40.0 -> v1.41.0 Updating rustc-hash v2.1.1 -> v2.1.2 Updating rustls-webpki v0.103.9 -> v0.103.10 Updating schannel v0.1.28 -> v0.1.29 Updating semver v1.0.27 -> v1.0.28 Updating serde_spanned v1.0.4 -> v1.1.1 Updating serde_with v3.17.0 -> v3.18.0 Updating serde_with_macros v3.17.0 -> v3.18.0 Updating simd-adler32 v0.3.8 -> v0.3.9 Updating socket2 v0.6.2 -> v0.6.3 Updating tempfile v3.26.0 -> v3.27.0 Updating terminal_size v0.4.3 -> v0.4.4 Updating testcontainers v0.27.1 -> v0.27.2 Updating tinystr v0.8.2 -> v0.8.3 Updating tinyvec v1.10.0 -> v1.11.0 Updating tokio v1.49.0 -> v1.51.0 Updating tokio-macros v2.6.1 -> v2.7.0 Adding toml_datetime v1.1.1+spec-1.1.0 Updating toml_edit v0.23.10+spec-1.0.0 -> v0.25.10+spec-1.1.0 Updating toml_parser v1.0.9+spec-1.1.0 -> v1.1.2+spec-1.1.0 Updating toml_writer v1.0.6+spec-1.1.0 -> v1.1.1+spec-1.1.0 Updating tracing-subscriber v0.3.22 -> v0.3.23 Updating unicode-segmentation v1.12.0 -> v1.13.2 Updating ureq v3.2.0 -> v3.3.0 Updating ureq-proto v0.5.3 -> v0.6.0 Removing utf-8 v0.7.6 Adding utf8-zero v0.8.1 Updating uuid v1.21.0 -> v1.23.0 Updating wasm-bindgen v0.2.114 -> v0.2.117 Updating wasm-bindgen-futures v0.4.64 -> v0.4.67 Updating wasm-bindgen-macro v0.2.114 -> v0.2.117 Updating wasm-bindgen-macro-support v0.2.114 -> v0.2.117 Updating wasm-bindgen-shared v0.2.114 -> v0.2.117 Updating web-sys v0.3.91 -> v0.3.94 Removing winnow v0.7.14 Adding winnow v0.7.15 Adding winnow v1.0.1 Updating writeable v0.6.2 -> v0.6.3 Updating yoke v0.8.1 -> v0.8.2 Updating yoke-derive v0.8.1 -> v0.8.2 Updating zerocopy v0.8.40 -> v0.8.48 Updating zerocopy-derive v0.8.40 -> v0.8.48 Updating zerofrom v0.1.6 -> v0.1.7 Updating zerofrom-derive v0.1.6 -> v0.1.7 Updating zerotrie v0.2.3 -> v0.2.4 Updating zerovec v0.11.5 -> v0.11.6 Updating zerovec-derive v0.11.2 -> v0.11.3 note: pass `--verbose` to see 9 unchanged dependencies behind latest ``` --- Cargo.lock | 543 +++++++++++++++++++++++++++++------------------------ 1 file changed, 296 insertions(+), 247 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6894e2bcd..9e0911944 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -84,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.21" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" dependencies = [ "anstyle", "anstyle-parse", @@ -99,15 +99,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" -version = "0.2.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" dependencies = [ "utf8parse", ] @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +checksum = "6a3a1fd6f75306b68087b831f025c712524bcb19aad54e557b1129cfa0a2b207" dependencies = [ "rustversion", ] @@ -190,9 +190,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "astral-tokio-tar" -version = "0.5.6" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5" +checksum = "3c23f3af104b40a3430ccb90ed5f7bd877a8dc5c26fc92fde51a22b40890dcf9" dependencies = [ "filetime", "futures-core", @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.1" +version = "1.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.38.0" +version = "0.39.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" +checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399" dependencies = [ "cc", "cmake", @@ -837,9 +837,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "227aa051deec8d16bd9c34605e7aaf153f240e35483dd42f6f78903847934738" +checksum = "ee04c4c84f1f811b017f2fbb7dd8815c976e7ca98593de9c1e2afad0f636bff4" dependencies = [ "async-stream", "base64 0.22.1", @@ -911,19 +911,20 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" dependencies = [ "borsh-derive", + "bytes", "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" dependencies = [ "once_cell", "proc-macro-crate", @@ -1040,9 +1041,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.56" +version = "1.2.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283" dependencies = [ "find-msvc-tools", "jobserver", @@ -1150,9 +1151,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" dependencies = [ "clap_builder", "clap_derive", @@ -1160,9 +1161,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ "anstream", "anstyle", @@ -1172,9 +1173,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" dependencies = [ "heck", "proc-macro2", @@ -1184,24 +1185,24 @@ dependencies = [ [[package]] name = "clap_lex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "cmake" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" [[package]] name = "combine" @@ -1485,12 +1486,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", + "darling_core 0.23.0", + "darling_macro 0.23.0", ] [[package]] @@ -1509,11 +1510,10 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" dependencies = [ - "fnv", "ident_case", "proc-macro2", "quote", @@ -1534,11 +1534,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ - "darling_core 0.21.3", + "darling_core 0.23.0", "quote", "syn 2.0.117", ] @@ -1705,9 +1705,9 @@ dependencies = [ [[package]] name = "env_filter" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" +checksum = "32e90c2accc4b07a8456ea0debdc2e7587bdd890680d71173a15d4ae604f6eef" dependencies = [ "log", "regex", @@ -1715,9 +1715,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +checksum = "0621c04f2196ac3f488dd583365b9c09be011a4ab8b9f37248ffcc8f6198b56a" dependencies = [ "env_filter", "log", @@ -1790,9 +1790,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.3.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" [[package]] name = "ferroid" @@ -1913,9 +1913,12 @@ dependencies = [ [[package]] name = "fragile" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" +checksum = "8878864ba14bb86e818a412bfd6f18f9eabd4ec0f008a28e8f7eb61db532fcf9" +dependencies = [ + "futures-core", +] [[package]] name = "frunk" @@ -2134,20 +2137,20 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 6.0.0", "rand_core 0.10.0", "wasip2", "wasip3", @@ -2201,7 +2204,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.13.0", + "indexmap 2.13.1", "slab", "tokio", "tokio-util", @@ -2216,7 +2219,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.40", + "zerocopy 0.8.48", ] [[package]] @@ -2343,9 +2346,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" dependencies = [ "atomic-waker", "bytes", @@ -2358,7 +2361,6 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "pin-utils", "smallvec", "tokio", "want", @@ -2425,7 +2427,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.2", + "socket2 0.6.3", "system-configuration", "tokio", "tower-service", @@ -2474,12 +2476,13 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" dependencies = [ "displaydoc", "potential_utf", + "utf8_iter", "yoke", "zerofrom", "zerovec", @@ -2487,9 +2490,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" dependencies = [ "displaydoc", "litemap", @@ -2500,9 +2503,9 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" dependencies = [ "icu_collections", "icu_normalizer_data", @@ -2514,15 +2517,15 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" [[package]] name = "icu_properties" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" dependencies = [ "icu_collections", "icu_locale_core", @@ -2534,15 +2537,15 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" [[package]] name = "icu_provider" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" dependencies = [ "displaydoc", "icu_locale_core", @@ -2599,9 +2602,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.13.0" +version = "2.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +checksum = "45a8a2b9cb3e0b0c1803dbb0758ffac5de2f425b23c28f518faabd9d805342ff" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -2641,9 +2644,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" dependencies = [ "memchr", "serde", @@ -2701,9 +2704,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "jni" @@ -2714,7 +2717,7 @@ dependencies = [ "cesu8", "cfg-if", "combine", - "jni-sys", + "jni-sys 0.3.1", "log", "thiserror 1.0.69", "walkdir", @@ -2723,9 +2726,31 @@ dependencies = [ [[package]] name = "jni-sys" -version = "0.3.0" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258" +dependencies = [ + "jni-sys 0.4.1", +] + +[[package]] +name = "jni-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2" +dependencies = [ + "jni-sys-macros", +] + +[[package]] +name = "jni-sys-macros" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" +checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264" +dependencies = [ + "quote", + "syn 2.0.117", +] [[package]] name = "jobserver" @@ -2739,10 +2764,12 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.91" +version = "0.3.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" dependencies = [ + "cfg-if", + "futures-util", "once_cell", "wasm-bindgen", ] @@ -2770,9 +2797,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.182" +version = "0.2.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" [[package]] name = "libloading" @@ -2792,9 +2819,9 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" dependencies = [ "bitflags", "libc", @@ -2804,9 +2831,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" +checksum = "b1f111c8c41e7c61a49cd34e44c7619462967221a6443b0ec299e0ac30cfb9b1" dependencies = [ "cc", "pkg-config", @@ -2815,9 +2842,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.24" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" +checksum = "fc3a226e576f50782b3305c5ccf458698f92798987f551c6a02efe8276721e22" dependencies = [ "cc", "pkg-config", @@ -2832,15 +2859,15 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" [[package]] name = "local-ip-address" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ef8c257c92ade496781a32a581d43e3d512cf8ce714ecf04ea80f93ed0ff4a" +checksum = "d4a59a0cb1c7f84471ad5cd38d768c2a29390d17f1ff2827cdf49bc53e8ac70b" dependencies = [ "libc", "neli", @@ -2946,9 +2973,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" dependencies = [ "libc", "wasi", @@ -3187,9 +3214,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" [[package]] name = "num-integer" @@ -3242,9 +3269,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.3" +version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" [[package]] name = "once_cell_polyfill" @@ -3260,9 +3287,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.75" +version = "0.10.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf" dependencies = [ "bitflags", "cfg-if", @@ -3292,9 +3319,9 @@ checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "openssl-sys" -version = "0.9.111" +version = "0.9.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb" dependencies = [ "cc", "libc", @@ -3554,18 +3581,18 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" +checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" dependencies = [ "portable-atomic", ] [[package]] name = "potential_utf" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" dependencies = [ "zerovec", ] @@ -3582,7 +3609,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.40", + "zerocopy 0.8.48", ] [[package]] @@ -3633,11 +3660,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", + "toml_edit 0.25.10+spec-1.1.0", ] [[package]] @@ -3760,7 +3787,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.2", + "socket2 0.6.3", "thiserror 2.0.18", "tokio", "tracing", @@ -3769,9 +3796,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "aws-lc-rs", "bytes", @@ -3798,16 +3825,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.2", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.44" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" dependencies = [ "proc-macro2", ] @@ -3818,6 +3845,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "r2d2" version = "0.8.10" @@ -3841,9 +3874,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" +checksum = "5576df16239e4e422c4835c8ed00be806d4491855c7847dba60b7aa8408b469b" dependencies = [ "r2d2", "rusqlite", @@ -3884,7 +3917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" dependencies = [ "chacha20", - "getrandom 0.4.1", + "getrandom 0.4.2", "rand_core 0.10.0", ] @@ -4200,9 +4233,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.38.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" +checksum = "a0d2b0146dd9661bf67bb107c0bb2a55064d556eeb3fc314151b957f313bcd4e" dependencies = [ "bitflags", "fallible-iterator", @@ -4215,9 +4248,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" +checksum = "2ce901f9a19d251159075a4c37af514c3b8ef99c22e02dd8c19161cf397ee94a" dependencies = [ "arrayvec", "borsh", @@ -4227,6 +4260,7 @@ dependencies = [ "rkyv", "serde", "serde_json", + "wasm-bindgen", ] [[package]] @@ -4237,9 +4271,9 @@ checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" [[package]] name = "rustc_version" @@ -4330,9 +4364,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "aws-lc-rs", "ring", @@ -4369,9 +4403,9 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" dependencies = [ "windows-sys 0.61.2", ] @@ -4446,9 +4480,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" [[package]] name = "serde" @@ -4507,7 +4541,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.13.0", + "indexmap 2.13.1", "itoa", "ryu", "serde_core", @@ -4519,7 +4553,7 @@ version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "itoa", "memchr", "serde", @@ -4560,9 +4594,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.4" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26" dependencies = [ "serde_core", ] @@ -4581,15 +4615,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.13.0", + "indexmap 2.13.1", "schemars 0.9.0", "schemars 1.2.1", "serde_core", @@ -4600,11 +4634,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ - "darling 0.21.3", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.117", @@ -4659,9 +4693,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" +checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" [[package]] name = "simdutf8" @@ -4699,12 +4733,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -4885,12 +4919,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.26.0" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.4.1", + "getrandom 0.4.2", "once_cell", "rustix", "windows-sys 0.61.2", @@ -4907,12 +4941,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" +checksum = "230a1b821ccbd75b185820a1f1ff7b14d21da1e442e22c0863ea5f08771a8874" dependencies = [ "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -4923,9 +4957,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068" +checksum = "0bd36b06a2a6c0c3c81a83be1ab05fe86460d054d4d51bf513bc56b3e15bdc22" dependencies = [ "astral-tokio-tar", "async-trait", @@ -5044,9 +5078,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" dependencies = [ "displaydoc", "zerovec", @@ -5064,9 +5098,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" dependencies = [ "tinyvec_macros", ] @@ -5079,25 +5113,25 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.49.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +checksum = "2bd1c4c0fc4a7ab90fc15ef6daaa3ec3b893f004f915f2392557ed23237820cd" dependencies = [ "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.2", + "socket2 0.6.3", "tokio-macros", "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" dependencies = [ "proc-macro2", "quote", @@ -5156,13 +5190,13 @@ version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned 1.1.1", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", - "winnow", + "winnow 0.7.15", ] [[package]] @@ -5183,39 +5217,48 @@ dependencies = [ "serde_core", ] +[[package]] +name = "toml_datetime" +version = "1.1.1+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow", + "winnow 0.7.15", ] [[package]] name = "toml_edit" -version = "0.23.10+spec-1.0.0" +version = "0.25.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +checksum = "a82418ca169e235e6c399a84e395ab6debeb3bc90edc959bf0f48647c6a32d1b" dependencies = [ - "indexmap 2.13.0", - "toml_datetime 0.7.5+spec-1.1.0", + "indexmap 2.13.1", + "toml_datetime 1.1.1+spec-1.1.0", "toml_parser", - "winnow", + "winnow 1.0.1", ] [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.1.2+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" dependencies = [ - "winnow", + "winnow 1.0.1", ] [[package]] @@ -5226,9 +5269,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.6+spec-1.1.0" +version = "1.1.1+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" [[package]] name = "tonic" @@ -5249,7 +5292,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.2", + "socket2 0.6.3", "sync_wrapper", "tokio", "tokio-stream", @@ -5693,7 +5736,7 @@ checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.13.0", + "indexmap 2.13.1", "pin-project-lite", "slab", "sync_wrapper", @@ -5796,9 +5839,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" dependencies = [ "nu-ansi-term", "serde", @@ -5857,9 +5900,9 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-segmentation" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" [[package]] name = "unicode-width" @@ -5887,9 +5930,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc97a28575b85cfedf2a7e7d3cc64b3e11bd8ac766666318003abbacc7a21fc" +checksum = "dea7109cdcd5864d4eeb1b58a1648dc9bf520360d7af16ec26d0a9354bafcfc0" dependencies = [ "base64 0.22.1", "log", @@ -5897,14 +5940,14 @@ dependencies = [ "rustls", "rustls-pki-types", "ureq-proto", - "utf-8", + "utf8-zero", ] [[package]] name = "ureq-proto" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" +checksum = "e994ba84b0bd1b1b0cf92878b7ef898a5c1760108fe7b6010327e274917a808c" dependencies = [ "base64 0.22.1", "http", @@ -5926,10 +5969,10 @@ dependencies = [ ] [[package]] -name = "utf-8" -version = "0.7.6" +name = "utf8-zero" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "b8c0a043c9540bae7c578c88f91dda8bd82e59ae27c21baca69c8b191aaf5a6e" [[package]] name = "utf8_iter" @@ -5945,13 +5988,13 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.21.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9" dependencies = [ - "getrandom 0.4.1", + "getrandom 0.4.2", "js-sys", - "rand 0.9.2", + "rand 0.10.0", "wasm-bindgen", ] @@ -6024,36 +6067,33 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0" dependencies = [ "cfg-if", "once_cell", "rustversion", + "serde", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.64" +version = "0.4.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e" dependencies = [ - "cfg-if", - "futures-util", "js-sys", - "once_cell", "wasm-bindgen", - "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6061,9 +6101,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2" dependencies = [ "bumpalo", "proc-macro2", @@ -6074,9 +6114,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b" dependencies = [ "unicode-ident", ] @@ -6098,7 +6138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" dependencies = [ "anyhow", - "indexmap 2.13.0", + "indexmap 2.13.1", "wasm-encoder", "wasmparser", ] @@ -6111,15 +6151,15 @@ checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ "bitflags", "hashbrown 0.15.5", - "indexmap 2.13.0", + "indexmap 2.13.1", "semver", ] [[package]] name = "web-sys" -version = "0.3.91" +version = "0.3.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a" dependencies = [ "js-sys", "wasm-bindgen", @@ -6469,9 +6509,18 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.14" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5" dependencies = [ "memchr", ] @@ -6504,7 +6553,7 @@ checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" dependencies = [ "anyhow", "heck", - "indexmap 2.13.0", + "indexmap 2.13.1", "prettyplease", "syn 2.0.117", "wasm-metadata", @@ -6535,7 +6584,7 @@ checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", "bitflags", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "serde", "serde_derive", @@ -6554,7 +6603,7 @@ checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" dependencies = [ "anyhow", "id-arena", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "semver", "serde", @@ -6566,9 +6615,9 @@ dependencies = [ [[package]] name = "writeable" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" [[package]] name = "wyz" @@ -6597,9 +6646,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" dependencies = [ "stable_deref_trait", "yoke-derive", @@ -6608,9 +6657,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" dependencies = [ "proc-macro2", "quote", @@ -6630,11 +6679,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.40" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" dependencies = [ - "zerocopy-derive 0.8.40", + "zerocopy-derive 0.8.48", ] [[package]] @@ -6650,9 +6699,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.40" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" dependencies = [ "proc-macro2", "quote", @@ -6661,18 +6710,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" dependencies = [ "proc-macro2", "quote", @@ -6688,9 +6737,9 @@ checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] name = "zerotrie" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" dependencies = [ "displaydoc", "yoke", @@ -6699,9 +6748,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" dependencies = [ "yoke", "zerofrom", @@ -6710,9 +6759,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" dependencies = [ "proc-macro2", "quote", From 48e9606219dd27e69d1db8620be3dd754cb7a8c2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 09:00:06 +0100 Subject: [PATCH 235/247] refactor: extract project dictionary from cSpell.json Move the project-specific word list from the inline 'words' array in cSpell.json into a dedicated project-words.txt dictionary file, following the same pattern used in other Torrust organisation repositories. Update packages/metrics/cSpell.json to reference the shared dictionary instead of maintaining its own inline word list. Closes #1484 --- cSpell.json | 217 +++-------------------------------- packages/metrics/cSpell.json | 26 ++--- project-words.txt | 199 ++++++++++++++++++++++++++++++++ 3 files changed, 228 insertions(+), 214 deletions(-) create mode 100644 project-words.txt diff --git a/cSpell.json b/cSpell.json index 81421e050..43eb391d3 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,208 +1,23 @@ { - "words": [ - "Addrs", - "adduser", - "alekitto", - "appuser", - "Arvid", - "ASMS", - "asyn", - "autoclean", - "AUTOINCREMENT", - "automock", - "Avicora", - "Azureus", - "bdecode", - "bencode", - "bencoded", - "bencoding", - "beps", - "binascii", - "binstall", - "Bitflu", - "bools", - "Bragilevsky", - "bufs", - "buildid", - "Buildx", - "byteorder", - "callgrind", - "camino", - "canonicalize", - "canonicalized", - "certbot", - "chrono", - "Cinstrument", - "ciphertext", - "clippy", - "cloneable", - "codecov", - "codegen", - "completei", - "Condvar", - "connectionless", - "Containerfile", - "conv", - "curr", - "cvar", - "Cyberneering", - "dashmap", - "datagram", - "datetime", - "debuginfo", - "Deque", - "Dijke", - "distroless", - "dockerhub", - "downloadedi", - "dtolnay", - "elif", - "endianness", - "Eray", - "filesd", - "flamegraph", - "formatjson", - "Freebox", - "Frostegård", - "gecos", - "Gibibytes", - "Grcov", - "hasher", - "healthcheck", - "heaptrack", - "hexlify", - "hlocalhost", - "Hydranode", - "hyperthread", - "Icelake", - "iiiiiiiiiiiiiiiiiiiid", - "imdl", - "impls", - "incompletei", - "infohash", - "infohashes", - "infoschema", - "Intermodal", - "intervali", - "Joakim", - "kallsyms", - "Karatay", - "kcachegrind", - "kexec", - "keyout", - "Kibibytes", - "kptr", - "lcov", - "leecher", - "leechers", - "libsqlite", - "libtorrent", - "libz", - "LOGNAME", - "Lphant", - "matchmakes", - "Mebibytes", - "metainfo", - "middlewares", - "misresolved", - "mockall", - "multimap", - "myacicontext", - "ñaca", - "Naim", - "nanos", - "newkey", - "nextest", - "nocapture", - "nologin", - "nonroot", - "Norberg", - "numwant", - "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", - "oneshot", - "ostr", - "Pando", - "peekable", - "peerlist", - "programatik", - "proot", - "proto", - "Quickstart", - "Radeon", - "Rakshasa", - "Rasterbar", - "realpath", - "reannounce", - "Registar", - "repr", - "reqs", - "reqwest", - "rerequests", - "ringbuf", - "ringsize", - "rngs", - "rosegment", - "routable", - "rstest", - "rusqlite", - "rustc", - "RUSTDOCFLAGS", - "RUSTFLAGS", - "rustfmt", - "Rustls", - "Ryzen", - "Seedable", - "serde", - "Shareaza", - "sharktorrent", - "SHLVL", - "skiplist", - "slowloris", - "socketaddr", - "sqllite", - "subsec", - "Swatinem", - "Swiftbit", - "taiki", - "tdyne", - "Tebibytes", - "tempfile", - "testcontainers", - "thiserror", - "tlsv", - "Torrentstorm", - "torrust", - "torrustracker", - "trackerid", - "Trackon", - "typenum", - "udpv", - "Unamed", - "underflows", - "Unsendable", - "untuple", - "uroot", - "Vagaa", - "valgrind", - "Vitaly", - "vmlinux", - "Vuze", - "Weidendorfer", - "Werror", - "whitespaces", - "Xacrimon", - "XBTT", - "Xdebug", - "Xeon", - "Xtorrent", - "Xunlei", - "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd", - "zerocopy" + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": [ + "project-words" ], "enableFiletypes": [ "dockerfile", "shellscript", "toml" + ], + "ignorePaths": [ + "target", + "/project-words.txt" ] -} +} \ No newline at end of file diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json index f04cce9e3..8f5002833 100644 --- a/packages/metrics/cSpell.json +++ b/packages/metrics/cSpell.json @@ -1,21 +1,21 @@ { - "words": [ - "cloneable", - "formatjson", - "Gibibytes", - "Kibibytes", - "Mebibytes", - "ñaca", - "println", - "rstest", - "serde", - "subsec", - "Tebibytes", - "thiserror" + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "../../project-words.txt", + "addWords": true + } ], + "dictionaries": ["project-words"], "enableFiletypes": [ "dockerfile", "shellscript", "toml" + ], + "ignorePaths": [ + "target", + "/project-words.txt" ] } diff --git a/project-words.txt b/project-words.txt new file mode 100644 index 000000000..c698eea9c --- /dev/null +++ b/project-words.txt @@ -0,0 +1,199 @@ +Addrs +adduser +alekitto +appuser +Arvid +ASMS +asyn +autoclean +AUTOINCREMENT +automock +Avicora +Azureus +bdecode +bencode +bencoded +bencoding +beps +binascii +binstall +Bitflu +bools +Bragilevsky +bufs +buildid +Buildx +byteorder +callgrind +camino +canonicalize +canonicalized +certbot +chrono +Cinstrument +ciphertext +clippy +cloneable +codecov +codegen +completei +Condvar +connectionless +Containerfile +conv +curr +cvar +Cyberneering +dashmap +datagram +datetime +debuginfo +Deque +Dijke +distroless +dockerhub +downloadedi +dtolnay +elif +endianness +Eray +filesd +flamegraph +formatjson +Freebox +Frostegård +gecos +Gibibytes +Grcov +hasher +healthcheck +heaptrack +hexlify +hlocalhost +Hydranode +hyperthread +Icelake +iiiiiiiiiiiiiiiiiiiid +imdl +impls +incompletei +infohash +infohashes +infoschema +Intermodal +intervali +Joakim +kallsyms +Karatay +kcachegrind +kexec +keyout +Kibibytes +kptr +lcov +leecher +leechers +libsqlite +libtorrent +libz +LOGNAME +Lphant +matchmakes +Mebibytes +metainfo +middlewares +misresolved +mockall +multimap +myacicontext +ñaca +Naim +nanos +newkey +nextest +nocapture +nologin +nonroot +Norberg +numwant +nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7 +oneshot +ostr +Pando +peekable +peerlist +programatik +proot +proto +Quickstart +Radeon +Rakshasa +Rasterbar +realpath +reannounce +Registar +repr +reqs +reqwest +rerequests +ringbuf +ringsize +rngs +rosegment +routable +rstest +rusqlite +rustc +RUSTDOCFLAGS +RUSTFLAGS +rustfmt +Rustls +Ryzen +Seedable +serde +Shareaza +sharktorrent +SHLVL +skiplist +slowloris +socketaddr +sqllite +subsec +Swatinem +Swiftbit +taiki +tdyne +Tebibytes +tempfile +testcontainers +thiserror +tlsv +Torrentstorm +torrust +torrustracker +trackerid +Trackon +typenum +udpv +Unamed +underflows +Unsendable +untuple +uroot +Vagaa +valgrind +Vitaly +vmlinux +Vuze +Weidendorfer +Werror +whitespaces +Xacrimon +XBTT +Xdebug +Xeon +Xtorrent +Xunlei +xxxxxxxxxxxxxxxxxxxxd +yyyyyyyyyyyyyyyyyyyyd +zerocopy From c88f66cd42c733fafc5b7b4afeb862b7b3b28329 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:40 +0000 Subject: [PATCH 236/247] chore(deps): bump docker/login-action from 3 to 4 Bumps [docker/login-action](https://github.com/docker/login-action) from 3 to 4. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/login-action dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 7416df71e..2f2b0780c 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -117,7 +117,7 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} @@ -158,7 +158,7 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} From 504135d009b2c16768ed13ce4b44ddf50b7d2f2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:47 +0000 Subject: [PATCH 237/247] chore(deps): bump docker/metadata-action from 5 to 6 Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5 to 6. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 2f2b0780c..0615ad6be 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -108,7 +108,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -146,7 +146,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" From 2c78850ab49ede86f36f1d98f93f45b890b98895 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:32 +0000 Subject: [PATCH 238/247] chore(deps): bump docker/setup-buildx-action from 3 to 4 Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3 to 4. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 0615ad6be..f09a94bca 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -26,7 +26,7 @@ jobs: steps: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - id: build name: Build @@ -124,7 +124,7 @@ jobs: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push uses: docker/build-push-action@v6 @@ -165,7 +165,7 @@ jobs: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push uses: docker/build-push-action@v6 From 4f3f1956f4a8d1839f95bd697b851ab04df31fbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:27 +0000 Subject: [PATCH 239/247] chore(deps): bump docker/build-push-action from 6 to 7 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6 to 7. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6...v7) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-version: '7' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index f09a94bca..e0857e936 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -30,7 +30,7 @@ jobs: - id: build name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: false @@ -127,7 +127,7 @@ jobs: uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true @@ -168,7 +168,7 @@ jobs: uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true From f2612dc1fa0aa242503b0354227b644a12fbe47d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 12:34:06 +0100 Subject: [PATCH 240/247] docs(issue-523): add internal linting implementation plan --- docs/issues/523-internal-linting-tool.md | 141 +++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 docs/issues/523-internal-linting-tool.md diff --git a/docs/issues/523-internal-linting-tool.md b/docs/issues/523-internal-linting-tool.md new file mode 100644 index 000000000..14593e190 --- /dev/null +++ b/docs/issues/523-internal-linting-tool.md @@ -0,0 +1,141 @@ +# Issue #523 Implementation Plan (Internal Linting Tool) + +## Goal + +Replace the MegaLinter idea with Torrust internal linting tooling and integrate it into CI for this repository. + +## Scope + +- Target issue: https://github.com/torrust/torrust-tracker/issues/523 +- CI workflow to modify: .github/workflows/testing.yaml +- External reference workflow: https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml + +## Tasks + +### 0) Create a local branch following GitHub branch naming conventions + +- Approved branch name: `523-internal-linting-tool` +- Commands: + - `git fetch --all --prune` + - `git checkout develop` + - `git pull --ff-only` + - `git checkout -b 523-internal-linting-tool` +- Checkpoint: + - `git branch --show-current` should output `523-internal-linting-tool`. + +### 1) Install and run the linting tool locally; verify it passes in this repo + +- Identify/install internal linting package/tool used by Torrust (likely `torrust-linting` or equivalent wrapper). +- Ensure local runtime dependencies are present (if any). +- Note: linter config files (step 2) must exist in the repo root before a full suite run; it is fine to do a first exploratory run first to discover which linters are active. +- Run the internal linting command against this repository. +- Capture the exact command and output summary for reproducibility. +- Checkpoint: + - Linting command exits with code `0`. + +### 2) Add and adapt linter configuration files + +Some linters require a config file in the repo root. Use the deployer configs as reference and adapt values to this repository. + +| File | Linter | Reference | +| -------------------- | ---------------- | ----------------------------------------------------------------------------------------------------- | +| `.markdownlint.json` | markdownlint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.markdownlint.json | +| `.taplo.toml` | taplo (TOML fmt) | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.taplo.toml | +| `.yamllint-ci.yml` | yamllint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.yamllint-ci.yml | + +Key adaptations to make per file: + +- `.markdownlint.json`: review line-length rules and Markdown conventions used in this repo's docs. +- `.taplo.toml`: update `exclude` list to match this repo's generated/runtime folders (e.g. `target/**`, `storage/**`) instead of the deployer-specific ones (`build/**`, `data/**`, `envs/**`). +- `.yamllint-ci.yml`: update `ignore` block to reflect this repo's generated/runtime directories instead of cloud-init and deployer folders. + +Commit message: `ci(lint): add linter config files (.markdownlint.json, .taplo.toml, .yamllint-ci.yml)` + +Checkpoint: + +- Config files are present in the repo root. +- Running each individual linter against the repo with the config produces expected/controlled output. + +### 3) If local linting fails, fix all lint errors; commit fixes independently per linter + +- If the linting suite reports failures: + - Group findings by linter (for example: formatting, clippy, docs, spelling, yaml, etc.). + - Fix only one linter category at a time. + - Create one commit per linter category. +- Commit style proposal: + - `fix(lint/): resolve ` +- Constraints: + - Do not mix workflow/tooling changes with source lint fixes in the same commit. + - Keep each commit minimal and reviewable. +- Checkpoint: + - Re-run linting suite; all checks pass before moving to workflow integration. + +### 4) Review existing workflow example using internal linting + +- Read and analyze: + - https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml +- Extract and adapt: + - Trigger strategy. + - Tool setup/install method. + - Cache strategy. + - Invocation command and CI fail behavior. +- Checkpoint: + - Document a short mapping from deployer workflow pattern to this repo’s `testing.yaml` job structure. + +### 5) Modify `.github/workflows/testing.yaml` to use the internal linting tool + +- Update the current `check`/lint-related section to run the internal linting command. +- Replace existing lint/check execution path with the internal linting tool in this migration (no parallel transition mode). +- Ensure matrix/toolchain compatibility is explicit (nightly/stable behavior decided and documented). +- Validate workflow syntax before commit. +- Checkpoint: + - Workflow is valid and executes linting through internal tool. + +### 6) Commit workflow changes + +- Commit only workflow-related changes in a dedicated commit. +- Commit message proposal: + - `ci(lint): switch testing workflow to internal linting tool` +- Checkpoint: + - `git show --name-only --stat HEAD` includes only expected workflow files (and any required supporting CI files if intentionally added). + +### 7) Push to remote `josecelano` and open PR into `develop` + +- Verify remote exists: + - `git remote -v` +- Push branch: + - `git push -u josecelano 523-internal-linting-tool` +- Open PR targeting `torrust/torrust-tracker:develop` with head `josecelano:523-internal-linting-tool`. +- PR content should include: + - Why internal linting over MegaLinter. + - Summary of lint-fix commits by linter. + - Summary of workflow change. + - Evidence (local run + CI status). +- Checkpoint: + - PR is open, linked to issue #523, and ready for review. + +## Execution Notes + +- Keep PR review-friendly by separating commits by concern: + 1. Linter config files (step 2) + 2. Per-linter source fixes (step 3, only if needed) + 3. CI workflow migration (step 6) +- Use Conventional Commits for all commits in this implementation. +- If lint checks differ between local and CI, align tool versions and execution flags before merging. +- Avoid broad refactors unrelated to lint failures. + +## Decisions Confirmed + +1. Branch name: `523-internal-linting-tool`. +2. CI strategy: replace existing lint/check path with internal linting. +3. Commit convention: yes, use Conventional Commits. +4. PR target: base `torrust/torrust-tracker:develop`, head `josecelano:523-internal-linting-tool`. + +## Risks and Mitigations + +- Risk: Internal linting wrapper may not be version-pinned and may produce unstable CI behavior. + - Mitigation: Pin tool version in workflow installation step. +- Risk: Internal linting may overlap with existing checks, increasing CI time. + - Mitigation: Remove redundant jobs only after verifying coverage parity. +- Risk: Tool may require secrets or environment assumptions not available in CI. + - Mitigation: Run dry-run in GitHub Actions on branch before requesting review. From fa3b491bb70348be86bc51f80431ece411596554 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:00:55 +0100 Subject: [PATCH 241/247] ci(lint): add linter config files --- .markdownlint.json | 18 ++++++++++++++++++ .taplo.toml | 31 +++++++++++++++++++++++++++++++ .yamllint-ci.yml | 16 ++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 .markdownlint.json create mode 100644 .taplo.toml create mode 100644 .yamllint-ci.yml diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 000000000..19ec47c2e --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,18 @@ +{ + "default": true, + "MD013": false, + "MD031": true, + "MD032": true, + "MD040": true, + "MD022": true, + "MD009": true, + "MD007": { + "indent": 2 + }, + "MD026": false, + "MD041": false, + "MD034": false, + "MD024": false, + "MD033": false, + "MD060": false +} diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 000000000..d0f755dcd --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,31 @@ +# Taplo configuration file for TOML formatting +# Used by the "Even Better TOML" VS Code extension + +# Exclude generated and runtime folders from linting +exclude = [ + "target/**", + "storage/**", + ".coverage/**", +] + +[formatting] +# Preserve blank lines that exist +allowed_blank_lines = 1 +# Don't reorder keys to maintain structure +reorder_keys = false +# Array formatting +array_trailing_comma = true +array_auto_expand = false +array_auto_collapse = false +# Inline table formatting +inline_table_expand = false +compact_inline_tables = false +compact_arrays = false +# Alignment +align_entries = false +align_comments = true +# Indentation +indent_tables = false +indent_entries = false +# Other +trailing_newline = true diff --git a/.yamllint-ci.yml b/.yamllint-ci.yml new file mode 100644 index 000000000..9380b592a --- /dev/null +++ b/.yamllint-ci.yml @@ -0,0 +1,16 @@ +extends: default + +rules: + line-length: + max: 200 # More reasonable for infrastructure code + comments: + min-spaces-from-content: 1 # Allow single space before comments + document-start: disable # Most project YAML files don't require --- + truthy: + allowed-values: ["true", "false", "yes", "no", "on", "off"] # Allow common GitHub Actions values + +# Ignore generated/runtime directories +ignore: | + target/** + storage/** + .coverage/** From bc1f8cc72c0f8752480321860262a4e04f14305f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:09:21 +0100 Subject: [PATCH 242/247] fix(lint/clippy): resolve pedantic duration and style violations --- packages/axum-rest-tracker-api-server/src/server.rs | 4 ++-- .../benches/http_tracker_core_benchmark.rs | 2 +- .../benches/repository_benchmark.rs | 8 ++++---- .../torrent-repository-benchmarking/tests/entry/mod.rs | 5 +++-- .../tests/repository/mod.rs | 5 +++-- packages/tracker-client/src/udp/client.rs | 2 +- .../benches/udp_tracker_core_benchmark.rs | 2 +- packages/udp-tracker-server/src/server/launcher.rs | 2 +- packages/udp-tracker-server/src/statistics/repository.rs | 4 ++-- packages/udp-tracker-server/tests/server/contract.rs | 2 +- 10 files changed, 19 insertions(+), 17 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 05adeae8a..9eef6b71a 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -220,9 +220,9 @@ pub struct Launcher { impl std::fmt::Display for Launcher { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.tls.is_some() { - write!(f, "(with socket): {}, using TLS", self.bind_to,) + write!(f, "(with socket): {}, using TLS", self.bind_to) } else { - write!(f, "(with socket): {}, without TLS", self.bind_to,) + write!(f, "(with socket): {}, without TLS", self.bind_to) } } } diff --git a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs index aa50ceeb9..c193c5124 100644 --- a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs +++ b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs @@ -12,7 +12,7 @@ fn announce_once(c: &mut Criterion) { let mut group = c.benchmark_group("http_tracker_handle_announce_once"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("handle_announce_data", |b| { b.iter(|| sync::return_announce_data_once(100)); diff --git a/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs index a58207492..f5f8e4b28 100644 --- a/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs +++ b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs @@ -17,7 +17,7 @@ fn add_one_torrent(c: &mut Criterion) { let mut group = c.benchmark_group("add_one_torrent"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.iter_custom(sync::add_one_torrent::); @@ -74,7 +74,7 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -138,7 +138,7 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -202,7 +202,7 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) diff --git a/packages/torrent-repository-benchmarking/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs index 5cbb3b19c..86ca891d4 100644 --- a/packages/torrent-repository-benchmarking/tests/entry/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -1,5 +1,4 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -430,7 +429,9 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); torrent.upsert_peer(&peer).await; diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index ec7e68bae..fb0b8fcff 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -526,7 +526,6 @@ async fn it_should_remove_inactive_peers( repo: Repo, #[case] entries: Entries, ) { - use std::ops::Sub as _; use std::time::Duration; use torrust_tracker_clock::clock::stopped::Stopped as _; @@ -556,7 +555,9 @@ async fn it_should_remove_inactive_peers( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); } // Insert the infohash and peer into the repository diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs index 1c5ffd901..94c882d29 100644 --- a/packages/tracker-client/src/udp/client.rs +++ b/packages/tracker-client/src/udp/client.rs @@ -256,7 +256,7 @@ pub async fn check(service_binding: &ServiceBinding) -> Result { } }; - let sleep = time::sleep(Duration::from_millis(2000)); + let sleep = time::sleep(Duration::from_secs(2)); tokio::pin!(sleep); tokio::select! { diff --git a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs index 5bd0e27c8..90fc721d0 100644 --- a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs +++ b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs @@ -9,7 +9,7 @@ use crate::helpers::sync; fn bench_connect_once(c: &mut Criterion) { let mut group = c.benchmark_group("udp_tracker/connect_once"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("connect_once", |b| { b.iter(|| sync::connect_once(100)); diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index a514921cc..4fd3a95d9 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -54,7 +54,7 @@ impl Launcher { panic!("it should not use udp if using authentication"); } - let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + let socket = tokio::time::timeout(Duration::from_secs(5), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 94a86e3ab..c4c995b8a 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -330,7 +330,7 @@ mod tests { // Calculate new average with processing time of 2000ns // This will increment the processed requests counter from 0 to 1 - let processing_time = Duration::from_nanos(2000); + let processing_time = Duration::from_micros(2); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; @@ -417,7 +417,7 @@ mod tests { let now = CurrentClock::now(); // Test with zero connections (should not panic, should handle division by zero) - let processing_time = Duration::from_nanos(1000); + let processing_time = Duration::from_micros(1); let connect_labels = LabelSet::from([("request_kind", "connect")]); let connect_avg = repo diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index e9691c879..350f3b8eb 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -32,7 +32,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac match response { Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), + _ => panic!("error connecting to udp server {response:?}"), } } From f9b59f0c8e3dfbc0d79c0b43efbb10b95a157a6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:29:05 +0100 Subject: [PATCH 243/247] fix(lint/cspell): configure ignores and dictionary for repo --- .github/workflows/upload_coverage_pr.yaml | 2 +- cspell.json | 27 ++++++++++++ project-words.txt | 53 +++++++++++++++++++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 cspell.json diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index 8b0006a6d..55de02c62 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -1,7 +1,7 @@ name: Upload Coverage Report (PR) on: - # This workflow is triggered after every successfull execution + # This workflow is triggered after every successful execution # of `Generate Coverage Report` workflow. workflow_run: workflows: ["Generate Coverage Report (PR)"] diff --git a/cspell.json b/cspell.json new file mode 100644 index 000000000..02f29f7f9 --- /dev/null +++ b/cspell.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": [ + "project-words" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ], + "ignorePaths": [ + "target", + "docs/media/*.svg", + "contrib/bencode/benches/*.bencode", + "contrib/dev-tools/su-exec/**", + ".github/labels.json", + "/project-words.txt" + ] +} \ No newline at end of file diff --git a/project-words.txt b/project-words.txt index c698eea9c..48c9565cc 100644 --- a/project-words.txt +++ b/project-words.txt @@ -197,3 +197,56 @@ Xunlei xxxxxxxxxxxxxxxxxxxxd yyyyyyyyyyyyyyyyyyyyd zerocopy +Aideq +autoremove +CALLSITE +Dihc +Dmqcd +QJSF +Glrg +Irwe +Uninit +Unparker +eventfd +fastrand +fdbased +fdget +fput +iiiiiiiiiiiiiiiippe +iiiiiiiiiiiiiiiipp +iiiiiiiiiiiiiiip +iipp +iiiipp +jdbe +ksys +llist +mmap +mprotect +nonblocking +peersld +pkey +porti +prealloc +println +shellcheck +sockfd +subkey +sysmalloc +sysret +timespec +toki +torru +ttwu +uninit +unparked +unsync +vtable +wakelist +wakeup +actix +iterationsadd +josecelano +mysqladmin +setgroups +taplo +trixie From b654fa5fb18ffa94d167aef3e21becfdac7fbda7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:40:03 +0100 Subject: [PATCH 244/247] fix(lint/markdown): resolve markdownlint violations --- README.md | 18 ++++-------------- contrib/bencode/README.md | 3 ++- contrib/dev-tools/su-exec/README.md | 4 ++-- 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index bb102355b..2fe28db08 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ Others: ## Implemented BitTorrent Enhancement Proposals (BEPs) -> + > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ - [BEP 03]: The BitTorrent Protocol. @@ -113,8 +113,8 @@ podman run -it docker.io/torrust/tracker:develop ### Development Version -- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. -- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ +- Please ensure you have the \_\*\*[latest stable (or nightly) version of rust][rust]\_\_\_. +- Please ensure that your computer has enough RAM. \_\*\*Recommended 16GB.\_\_\_ #### Checkout, Test and Run @@ -217,7 +217,7 @@ This program is free software: you can redistribute it and/or modify it under th This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. -You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . +You should have received a copy of the _GNU Affero General Public License_ along with this program. If not, see . Some files include explicit copyright notices and/or license notices. @@ -250,18 +250,14 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg [testing_wf]: ../../actions/workflows/testing.yaml [testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg - [bittorrent]: http://bittorrent.org/ [rust]: https://www.rust-lang.org/ [axum]: https://github.com/tokio-rs/axum [newtrackon]: https://newtrackon.com/ [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker [torrust]: https://torrust.com/ - [dockerhub]: https://hub.docker.com/r/torrust/tracker/tags - [torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 - [BEP 00]: https://www.bittorrent.org/beps/bep_0000.html [BEP 03]: https://www.bittorrent.org/beps/bep_0003.html [BEP 07]: https://www.bittorrent.org/beps/bep_0007.html @@ -269,24 +265,18 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [BEP 23]: https://www.bittorrent.org/beps/bep_0023.html [BEP 27]: https://www.bittorrent.org/beps/bep_0027.html [BEP 48]: https://www.bittorrent.org/beps/bep_0048.html - [containers.md]: ./docs/containers.md - [docs]: https://docs.rs/torrust-tracker/latest/ [api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http [udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp - [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [discussions]: https://github.com/torrust/torrust-tracker/discussions - [guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md [agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md - [AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 [MIT_0]: ./docs/licenses/LICENSE-MIT_0 [FSF]: https://www.fsf.org/ - [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl [Naim A.]: https://github.com/naim94a/udpt diff --git a/contrib/bencode/README.md b/contrib/bencode/README.md index 7a203082b..81c09f691 100644 --- a/contrib/bencode/README.md +++ b/contrib/bencode/README.md @@ -1,4 +1,5 @@ # Bencode + This library allows for the creation and parsing of bencode encodings. -Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). \ No newline at end of file +Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md index 2b0517377..1dd4108ac 100644 --- a/contrib/dev-tools/su-exec/README.md +++ b/contrib/dev-tools/su-exec/README.md @@ -1,4 +1,5 @@ # su-exec + switch user and group id, setgroups and exec ## Purpose @@ -21,7 +22,7 @@ name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values can be used instead of names. Example: ```shell -$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf ``` ## TTY & parent/child handling @@ -43,4 +44,3 @@ PID USER TIME COMMAND This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) but it is only 10kb instead of 1.8MB. - From 0e174af960b966e3a6a8d1b69fd306a4fbc07b83 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:53:14 +0100 Subject: [PATCH 245/247] fix(lint/yaml): resolve workflow yamllint issues --- .github/workflows/container.yaml | 10 ++++++++-- .github/workflows/coverage.yaml | 4 ++-- .github/workflows/generate_coverage_pr.yaml | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index e0857e936..7e8ffa442 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -80,9 +80,15 @@ jobs: echo "continue=true" >> $GITHUB_OUTPUT echo "On \`develop\` Branch, Type: \`development\`" - elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then + elif [[ "${{ github.ref }}" =~ ^refs/heads/releases/ ]]; then + semver_regex='^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*)(\.(0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*))*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' + version=$(echo "${{ github.ref }}" | sed -n -E 's#^refs/heads/releases/##p') + + if [[ ! "$version" =~ $semver_regex ]]; then + echo "Not a valid release branch semver. Will Not Continue" + exit 0 + fi - version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') echo "version=$version" >> $GITHUB_OUTPUT echo "type=release" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 2c8d63d6c..4c49217c2 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -44,7 +44,7 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - id: upload @@ -54,4 +54,4 @@ jobs: verbose: true token: ${{ secrets.CODECOV_TOKEN }} files: ${{ github.workspace }}/codecov.json - fail_ci_if_error: true \ No newline at end of file + fail_ci_if_error: true diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index a3f97dbf2..e07a5a755 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -44,7 +44,7 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - name: Store PR number and commit SHA From 7085250ee5033b6ed62dfdf92e4c2c57256dbb85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:55:12 +0100 Subject: [PATCH 246/247] fix(lint/toml): normalize taplo formatting across workspace --- .cargo/config.toml | 32 +++++++++---------- .taplo.toml | 18 ++++------- Cargo.toml | 24 +++++++------- console/tracker-client/Cargo.toml | 18 +++++------ contrib/bencode/Cargo.toml | 4 +-- .../axum-health-check-api-server/Cargo.toml | 18 +++++------ packages/axum-http-tracker-server/Cargo.toml | 20 ++++++------ .../axum-rest-tracker-api-server/Cargo.toml | 28 ++++++++-------- packages/axum-server/Cargo.toml | 12 +++---- packages/clock/Cargo.toml | 4 +-- packages/configuration/Cargo.toml | 16 +++++----- packages/events/Cargo.toml | 4 +-- packages/http-protocol/Cargo.toml | 6 ++-- packages/http-tracker-core/Cargo.toml | 6 ++-- packages/located-error/Cargo.toml | 2 +- packages/metrics/Cargo.toml | 8 ++--- packages/primitives/Cargo.toml | 6 ++-- packages/rest-tracker-api-client/Cargo.toml | 10 +++--- packages/rest-tracker-api-core/Cargo.toml | 4 +-- packages/server-lib/Cargo.toml | 8 ++--- .../swarm-coordination-registry/Cargo.toml | 12 +++---- packages/test-helpers/Cargo.toml | 4 +-- .../Cargo.toml | 8 ++--- packages/tracker-client/Cargo.toml | 12 +++---- packages/tracker-core/Cargo.toml | 14 ++++---- packages/udp-protocol/Cargo.toml | 2 +- packages/udp-tracker-core/Cargo.toml | 6 ++-- packages/udp-tracker-server/Cargo.toml | 10 +++--- 28 files changed, 156 insertions(+), 160 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 28cde74ec..36a0b3d8c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -7,20 +7,20 @@ time = "build --timings --all-targets" [build] rustflags = [ - "-D", - "warnings", - "-D", - "future-incompatible", - "-D", - "let-underscore", - "-D", - "nonstandard-style", - "-D", - "rust-2018-compatibility", - "-D", - "rust-2018-idioms", - "-D", - "rust-2021-compatibility", - "-D", - "unused", + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", ] diff --git a/.taplo.toml b/.taplo.toml index d0f755dcd..0168711e8 100644 --- a/.taplo.toml +++ b/.taplo.toml @@ -2,11 +2,7 @@ # Used by the "Even Better TOML" VS Code extension # Exclude generated and runtime folders from linting -exclude = [ - "target/**", - "storage/**", - ".coverage/**", -] +exclude = [ ".coverage/**", "storage/**", "target/**" ] [formatting] # Preserve blank lines that exist @@ -14,18 +10,18 @@ allowed_blank_lines = 1 # Don't reorder keys to maintain structure reorder_keys = false # Array formatting -array_trailing_comma = true -array_auto_expand = false array_auto_collapse = false +array_auto_expand = false +array_trailing_comma = true # Inline table formatting -inline_table_expand = false -compact_inline_tables = false compact_arrays = false +compact_inline_tables = false +inline_table_expand = false # Alignment -align_entries = false align_comments = true +align_entries = false # Indentation -indent_tables = false indent_entries = false +indent_tables = false # Other trailing_newline = true diff --git a/Cargo.toml b/Cargo.toml index dbc39bdf8..1eb5f0d35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,13 +19,13 @@ version.workspace = true name = "torrust_tracker_lib" [workspace.package] -authors = ["Nautilus Cyberneering , Mick van Dijke "] -categories = ["network-programming", "web-programming"] +authors = [ "Nautilus Cyberneering , Mick van Dijke " ] +categories = [ "network-programming", "web-programming" ] description = "A feature rich BitTorrent tracker." documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" -keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] +keywords = [ "bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker" ] license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" @@ -34,19 +34,19 @@ version = "3.0.0-develop" [dependencies] anyhow = "1" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } -chrono = { version = "0", default-features = false, features = ["clock"] } -clap = { version = "4", features = ["derive", "env"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +clap = { version = "4", features = [ "derive", "env" ] } rand = "0" regex = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2.0.12" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } @@ -59,7 +59,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/co torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } [dev-dependencies] bittorrent-primitives = "0.1.0" @@ -70,7 +70,7 @@ torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client", "packages/torrent-repository-benchmarking"] +members = [ "console/tracker-client", "packages/torrent-repository-benchmarking" ] [profile.dev] debug = 1 diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml index d4ab7c9e3..8c12227e9 100644 --- a/console/tracker-client/Cargo.toml +++ b/console/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A collection of console clients to make requests to BitTorrent trackers." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-tracker-client" readme = "README.md" @@ -19,21 +19,21 @@ anyhow = "1" aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../../packages/tracker-client" } -clap = { version = "4", features = ["derive", "env"] } +clap = { version = "4", features = [ "derive", "env" ] } futures = "0" hex-literal = "1" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" -serde_json = { version = "1", features = ["preserve_order"] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../../packages/configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } -url = { version = "2", features = ["serde"] } +tracing-subscriber = { version = "0", features = [ "json" ] } +url = { version = "2", features = [ "serde" ] } [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index f6355b6fc..5fab1792d 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -1,10 +1,10 @@ [package] description = "(contrib) Efficient decoding and encoding for bencode." -keywords = ["bencode", "contrib", "library"] +keywords = [ "bencode", "contrib", "library" ] name = "torrust-tracker-contrib-bencode" readme = "README.md" -authors = ["Nautilus Cyberneering , Andrew "] +authors = [ "Nautilus Cyberneering , Andrew " ] license = "Apache-2.0" repository = "https://github.com/torrust/bittorrent-infrastructure-project" diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index e0504f7df..cf9d8d9a3 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-health-check-api-server" publish.workspace = true @@ -14,27 +14,27 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum = { version = "0", features = ["macros"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } futures = "0" hyper = "1" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" url = "2.5.4" [dev-dependencies] -reqwest = { version = "0", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "../axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "../axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "../axum-rest-tracker-api-server" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index eb2c2cad3..88d073527 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-http-tracker-server" publish.workspace = true @@ -15,19 +15,19 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } +axum = { version = "0", features = [ "macros" ] } axum-client-ip = "0" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } @@ -35,8 +35,8 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] @@ -49,5 +49,5 @@ serde_repr = "0" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 9493b8693..7353e66e8 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Tracker API." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-rest-tracker-api-server" publish.workspace = true @@ -15,22 +15,22 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } -axum-extra = { version = "0", features = ["query"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-extra = { version = "0", features = [ "query" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -serde_with = { version = "3", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +serde_with = { version = "3", features = [ "json" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "../rest-tracker-api-core" } @@ -41,8 +41,8 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" url = "2" @@ -51,5 +51,5 @@ local-ip-address = "0" mockall = "0" torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml index a60bab885..45eddd3b0 100644 --- a/packages/axum-server/Cargo.toml +++ b/packages/axum-server/Cargo.toml @@ -4,7 +4,7 @@ description = "A wrapper for the Axum server for Torrust HTTP servers to add tim documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "server", "torrust", "wrapper"] +keywords = [ "axum", "server", "torrust", "wrapper" ] license.workspace = true name = "torrust-axum-server" publish.workspace = true @@ -14,19 +14,19 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } -camino = { version = "1", features = ["serde", "serde1"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } +camino = { version = "1", features = [ "serde", "serde1" ] } futures-util = "0" http-body = "1" hyper = "1" -hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +hyper-util = { version = "0", features = [ "http1", "http2", "tokio" ] } pin-project-lite = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } -tower = { version = "0", features = ["timeout"] } +tower = { version = "0", features = [ "timeout" ] } tracing = "0" [dev-dependencies] diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 3bd00d2b0..c0cafff0a 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to a clock for the torrust tracker." -keywords = ["clock", "library", "torrents"] +keywords = [ "clock", "library", "torrents" ] name = "torrust-tracker-clock" readme = "README.md" @@ -16,7 +16,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -chrono = { version = "0", default-features = false, features = ["clock"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } lazy_static = "1" tracing = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e213f7c0c..1155ba417 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide configuration to the Torrust Tracker." -keywords = ["config", "library", "settings"] +keywords = [ "config", "library", "settings" ] name = "torrust-tracker-configuration" readme = "README.md" @@ -15,18 +15,18 @@ rust-version.workspace = true version.workspace = true [dependencies] -camino = { version = "1", features = ["serde", "serde1"] } -derive_more = { version = "2", features = ["constructor", "display"] } -figment = { version = "0", features = ["env", "test", "toml"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +camino = { version = "1", features = [ "serde", "serde1" ] } +derive_more = { version = "2", features = [ "constructor", "display" ] } +figment = { version = "0", features = [ "env", "test", "toml" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } serde_with = "3" thiserror = "2" toml = "0" torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } url = "2" [dev-dependencies] -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/events/Cargo.toml b/packages/events/Cargo.toml index 1d183cddb..165ecca68 100644 --- a/packages/events/Cargo.toml +++ b/packages/events/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with functionality to handle events in Torrust tracker packages." -keywords = ["events", "library", "rust", "torrust", "tracker"] +keywords = [ "events", "library", "rust", "torrust", "tracker" ] name = "torrust-tracker-events" readme = "README.md" @@ -16,7 +16,7 @@ version.workspace = true [dependencies] futures = "0" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } [dev-dependencies] mockall = "0" diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 7803fe78e..78a037b18 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent HTTP tracker protocol." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "bittorrent-http-tracker-protocol" readme = "README.md" @@ -18,10 +18,10 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } multimap = "0" percent-encoding = "2" -serde = { version = "1", features = ["derive"] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" thiserror = "2" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 04a6c96b6..c419052f9 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-http-tracker-core" publish.workspace = true @@ -18,11 +18,11 @@ aquatic_udp_protocol = "0" bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -criterion = { version = "0.5.1", features = ["async_tokio"] } +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 29b0dfb2c..232a6113f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide error decorator with the location and the source of the original error." -keywords = ["errors", "helper", "library"] +keywords = [ "errors", "helper", "library" ] name = "torrust-tracker-located-error" readme = "README.md" diff --git a/packages/metrics/Cargo.toml b/packages/metrics/Cargo.toml index 0597785f4..b6d327d70 100644 --- a/packages/metrics/Cargo.toml +++ b/packages/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["api", "library", "metrics"] +keywords = [ "api", "library", "metrics" ] name = "torrust-tracker-metrics" readme = "README.md" @@ -15,9 +15,9 @@ rust-version.workspace = true version.workspace = true [dependencies] -chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "2", features = ["constructor"] } -serde = { version = "1", features = ["derive"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } serde_json = "1.0.140" thiserror = "2" torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 21fab09bf..c9ce64177 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "torrust-tracker-primitives" readme = "README.md" @@ -18,8 +18,8 @@ version.workspace = true aquatic_udp_protocol = "0" binascii = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["constructor"] } -serde = { version = "1", features = ["derive"] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "2" diff --git a/packages/rest-tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml index c01b9c05a..47307df9a 100644 --- a/packages/rest-tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to interact with the Torrust Tracker REST API." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-rest-tracker-api-client" readme = "README.md" @@ -16,8 +16,8 @@ version.workspace = true [dependencies] hyper = "1" -reqwest = { version = "0", features = ["json", "query"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json", "query" ] } +serde = { version = "1", features = [ "derive" ] } thiserror = "2" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index be6d493d7..0808c2dd6 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "torrust-rest-tracker-api-core" publish.workspace = true @@ -17,7 +17,7 @@ version.workspace = true bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index 1d30e7fb5..fbd7a7a7f 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -4,7 +4,7 @@ description = "Common functionality used in all Torrust HTTP servers." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["lib", "server", "torrust"] +keywords = [ "lib", "server", "torrust" ] license.workspace = true name = "torrust-server-lib" publish.workspace = true @@ -14,10 +14,10 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = { version = "2", features = ["as_ref", "constructor", "display", "from"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "display", "from" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 45359ad81..f9513d3c4 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library that provides a repository of torrents files and their peers." -keywords = ["library", "repository", "torrents"] +keywords = [ "library", "repository", "torrents" ] name = "torrust-tracker-swarm-coordination-registry" readme = "README.md" @@ -18,12 +18,12 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -chrono = { version = "0", default-features = false, features = ["clock"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } crossbeam-skiplist = "0" futures = "0" -serde = { version = "1.0.219", features = ["derive"] } +serde = { version = "1.0.219", features = [ "derive" ] } thiserror = "2.0.12" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } @@ -33,8 +33,8 @@ torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" tracing = "0" [dev-dependencies] -async-std = { version = "1", features = ["attributes", "tokio1"] } -criterion = { version = "0", features = ["async_tokio"] } +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } mockall = "0" rand = "0" rstest = "0" diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 3495c314a..fb240730d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library providing helpers for testing the Torrust tracker." -keywords = ["helper", "library", "testing"] +keywords = [ "helper", "library", "testing" ] name = "torrust-tracker-test-helpers" readme = "README.md" @@ -18,4 +18,4 @@ version.workspace = true rand = "0" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/torrent-repository-benchmarking/Cargo.toml b/packages/torrent-repository-benchmarking/Cargo.toml index 1a93c513c..653ad8102 100644 --- a/packages/torrent-repository-benchmarking/Cargo.toml +++ b/packages/torrent-repository-benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to runt benchmarking for different implementations of a repository of torrents files and their peers." -keywords = ["library", "repository", "torrents"] +keywords = [ "library", "repository", "torrents" ] name = "torrust-tracker-torrent-repository-benchmarking" readme = "README.md" @@ -22,15 +22,15 @@ crossbeam-skiplist = "0" dashmap = "6" futures = "0" parking_lot = "0" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } zerocopy = "0.7" [dev-dependencies] -async-std = { version = "1", features = ["attributes", "tokio1"] } -criterion = { version = "0", features = ["async_tokio"] } +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } rstest = "0" [[bench]] diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index ef5cccaa2..0cd419471 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the generic tracker clients." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "bittorrent-tracker-client" readme = "README.md" @@ -17,16 +17,16 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } hyper = "1" percent-encoding = "2" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" serde_repr = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } @@ -34,4 +34,4 @@ tracing = "0" zerocopy = "0.7" [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index dfc83e58e..fb864cde7 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-tracker-core" publish.workspace = true @@ -16,17 +16,17 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } mockall = "0" r2d2 = "0" r2d2_mysql = "25" -r2d2_sqlite = { version = "0", features = ["bundled"] } +r2d2_sqlite = { version = "0", features = [ "bundled" ] } rand = "0" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-protocol/Cargo.toml b/packages/udp-protocol/Cargo.toml index 31fd52af8..3bcde9a95 100644 --- a/packages/udp-protocol/Cargo.toml +++ b/packages/udp-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent UDP tracker protocol." -keywords = ["bittorrent", "library", "primitives", "udp"] +keywords = [ "bittorrent", "library", "primitives", "udp" ] name = "bittorrent-udp-tracker-protocol" readme = "README.md" diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index aa12f898f..828b3aff2 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-udp-tracker-core" publish.workspace = true @@ -21,14 +21,14 @@ bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-pr bloom = "0.3.2" blowfish = "0" cipher = "0.4" -criterion = { version = "0.5.1", features = ["async_tokio"] } +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" generic-array = "0" lazy_static = "1" rand = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 160fe58f9..dc66572d8 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent UDP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "server", "torrust", "tracker", "udp"] +keywords = [ "axum", "bittorrent", "server", "torrust", "tracker", "udp" ] license.workspace = true name = "torrust-udp-tracker-server" publish.workspace = true @@ -19,13 +19,13 @@ bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" futures-util = "0" ringbuf = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } @@ -35,8 +35,8 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" [dev-dependencies] From 1d3ba500e9404c971703f24a3e2132dc62486304 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:59:16 +0100 Subject: [PATCH 247/247] ci(lint): switch testing workflow to internal linting tool --- .github/workflows/testing.yaml | 39 ++++++++++++---------------------- 1 file changed, 13 insertions(+), 26 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index c9328d890..83a290663 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -33,9 +33,10 @@ jobs: run: cargo fmt --check check: - name: Static Analysis + name: Linting runs-on: ubuntu-latest needs: format + timeout-minutes: 15 strategy: matrix: @@ -51,39 +52,25 @@ jobs: uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ matrix.toolchain }} - components: clippy + components: clippy, rustfmt + + - id: node + name: Setup Node.js + uses: actions/setup-node@v5 + with: + node-version: "20" - id: cache name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: cargo-machete - - - id: check - name: Run Build Checks - run: cargo check --tests --benches --examples --workspace --all-targets --all-features + name: Install Internal Linter + run: cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter - id: lint - name: Run Lint Checks - run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features - - - id: docs - name: Lint Documentation - env: - RUSTDOCFLAGS: "-D warnings" - run: cargo doc --no-deps --bins --examples --workspace --all-features - - - id: clean - name: Clean Build Directory - run: cargo clean - - - id: deps - name: Check Unused Dependencies - run: cargo machete + name: Run All Linters + run: linter all build: name: Build on ${{ matrix.os }} (${{ matrix.toolchain }})

N#W8OnjQf$?#Wcy}JV5c&0xN-Wgx z6Yg z@^URF{r)_|lp@twFs;C2{J$dZzA40xZCxsF9`2v?&+oti6bodxkDrFtzo*Ka# zqbTWS)}2sDT1pvoL=R>R8%BR4O?b|Vk%gpmQ~J`4xDL*H)E=v!vY08)BTDlVLey;g zktyfL7+qxs^FASsR| zf5wW!3=};Q(J(0NHZy4n_r<{)^I$azz4dcxhrWC!(t?o`Zz_)_y8peyPU$kHhP)r2 z_)7u`#GRwOxKAXJNL)*uQC0g0oB@-g>H#{t6O7Q691(7GQ0kasfgYSF9^%OyYD#GF zTSonkGf^oBv>8MfBndLyKdJxlROxe3den+(g#CO5&M`(3!htV(#d`NdV}eC@Z4ms` z8()oBLDq6$c-h+Zz0(CQ{~t>fT<2lifRGB0LGTG(b#$qT#N&rS5S@8IHD1jYmb~lT z#EA+S{ckX>sZb?BFfS|E-rVQhqHf!8T?ihdkylP+>Jw>}7@V20zbcPJmtCn{j#oa= z`Xr?+Jx;m{XiAinKyml7_A(-x``mVlD^gvhyvH^QF=i`Ev7Wn4YBNe#f z068no3h@=!aO{c%pCMg#tfmG}iKc#S?nsg{V6OPVCg@a|^shs-(*eNnM7x7xZcNxamQ9=qEcsy;e( zehD>LgK;{55N@@)ue|3z(}}3)7mKZ-f!YU>A-dEf?RePPh@luu6ns1Ws9M2rsUg_mU-OcnPrTNmXq-ebzff*>f22BVG}X!EEh%kt_e_gMAtJ;{EhqP%vpIzY4V<f3h_HAMnZ&9*;}r(c&IY_Z`S$N{aN6wSK;{CK0Fl1VDZ? z!FO@~Cui~``ITVZ#u5T%?#RYY1x%(F*jbCqrUM2u*$1yI(ePd=Y3$ ziicD)_C20|w?~&_iS<90tGcN^8o4;hZs|4r3o0tKqCBS2q>pmHPbM&m${yqiSB9e^ z=m1mnwczMp*mf!y77ry%Wx{YpeG;s@G$Ck?Kh%lVOr7lO(nR_Sw!3aJ8t@Jf{uq`~ zBz+F7XBX^I)(@(=1 z*p^*8oEIGgeD=0tTc8fc$Ob4Y1*pS2OZ#(W$=vUM?8{#SuHHcT|EX_z7@sg-(i&x4 z$#|2PWRojA-nJ+dazK0IN{R%1uI6I3qNf%vDpPi**yn+DXOWeqvj6`sKyEH`qTK3g zg7g9OOtz!?Wt03-{h^pDv6wBdP{1~0@ZrR{F-z;rifoIhssDmHOrGNlkIfcwwt1#t zIfdJ=6AHH2QGZSINv3H9CELgs9@yMh4(4SlQHbDm_u)&gA_}55b1K{Y8g)`Qnjt ze`V^O5J5!uo)mNHA;%Mj)g)#H2W_q|KgfaO?86~}2ibx@9rH*179M6{m&66V~ zt|Hll;sdZ6SI_rbXe**7HBJi?mCAgOxmHFsVh<`U^i4C8#>qY6IdYV{ERW!Ec0%nB zz@%KlS$BTcVKy4qi_z)||0f~oMJtfZ%#*$}^i0iC$^7r8B&&A>i#`O2!2r$jc}y`J z)x$+dcCHMgv2Xm*OO9f{(Ob1Q_XCrjU34YWf98?`(q!xq{U-yS@c`FUt+5xtYuD-Y z{>H=-6A>@1z5~FY zzC%$yT6sS>FKp@?>U8KEPqV}KcrOS=;kGxX!_>l*5gOb)t(bxeG- zGizg9B3~Nc7J@1xJXOUBc2R#by-hbku`Qc3j1z$dPX05OMpU3kY&4yNO9`-d@bvmaw?v|x!s!U#3W=U87ll0wtOx$MU%Q56%|*$zmgfiCZs7fAMjbxTA|aswC$)< zt`xm`nhT+hN2yXDt--Mbn?8-o@2xO~b^=chpsHC(mHE){%MhP%B`Z*hT4d4Am?b%7 zL@_042WV)xk>g5Le%gt_BQ@lLT;OIQ_`6iWBn)#8+7uK<)efn3Dj&%r87d4|MZrTh z)&|ZtTZM57|A4#kX}DI+zI705`iqnM(817%PYC-%3p9%dI-s70FbT_jy7L+(+W zl{m!8Z^x#r*DG}HjfTmlHm#!LlN+uIty%YhY=@qU3onKYp%eMJLd+ai4#jW4q zhA7#k+w>=OGAfIl3{~0 z8BjP>(#V4KOAlfPJ^?AbiYuzVX%e9(`J!iu_S}3-w)xRD$3&6y4jtbWc6>n3eul(v zuOjyz)w7D1TTU7&)EQ1za~XkFkZiV7Z2ObCF08}D^HKnl#87yW(c$6e@)zSvuPe{# zws*iC1X`R4Bujd{cgg}ZO1gy!B>^Fa@e{vXJ(1m!@>G;xU5!#PXbI4-$WqzGEK-ONyYB6y z<4RZ-O=%?GS2YF?R}D^ULP2X|ErG7bwZ%C-^Fa6H$7$ly=Ooy2NOoa9@rGo$^f8Zi z%AngPizcs0;Ix{otqNMYhc@$2gAHvx8HthCK_$4pCl~?{k8ExNDpykC1h0aw+dsRK=6A{2@GQHb46Nr*ou5m*ATCFG5SlVUv?>oyn}04bnAKT z6(b|LH+8=yD4ikN)GFBYoSg&r>YHms3`dB{ZMv3p(TFER_I0kNaRGJ(j|a=5cDP+i zzs3&a;WCel{}Fo#VBS9Jnpg!zLQKDe+VxS`F%7CFHV)Z->xNe89+B14ADebK+O&A& z(^MDYjx||-$x(gPZi5a>Aqvkv`qY*ztLUn~7o9S)hdOb89UfD!l~i{sg3BQ;_^&hB4CqS~7{bNOZJ;uOS=Q zSEkmx{{-4`801M0-6HZ}&CN_^=N(LK-^qq7COkv$N6)PS z4gA^!vbo!+ElzD_LNb4RQ-6@Q`FHpy{pLI3-It3*w146dS>LuNT1&0cz(ea7K zyD=Q$H~$Zi*q#}Bqw3Id!ECi&b_;zPmqiuUrcv#sqGEkAKl>vkfUbPtHtz14qVotX zMuAD&o9k$N<(43d%%_VElLOA5Bqim*!r1PDxC8x(M4k~dW zR<_ZK2``rCkp5D8?G@GCfSG?oFMZ>J-|48m&Es0MDxtH7uZ&=7DsH{tM$ZO~TqE>* z;@`R8-<5xSs}8A*8Rx>v4Y_P{uzE&DQ`PRHSZ5wEMDr~(JOSPj&Qm z9MtF>l?YZNmeo$AoMg9Xchs4e{S@r;%d_4LwN{EXL$`AtJF!x`LdBv)&abB=YJ@@$ zoReo}-K(e&`eAD~{$O0iUe_tYT|Jd%piZ+GgmSUbZIzOP0<~G#0rudp1|ymM5Qy1< za)rsYH@=nC`VECVSuQA{D3#17k7kJC-h-_oDyH4 z2iyB!9D#a`k3mVb+v53mtNCX3kX0Sy#89q#G6wRiB;bsCmhO13oXH2e( z`sKaJnjJ24bs;cs|3uqRh*go^GT%E0y1YJn;bh9RGn3a`Z9B9BU1pa93%=it*{xZu zoB=&YPliY)`1w@_C8l+%3Z?{w3Y3_+7n;uuvZ*S-pmK%_#wem5OfMe*EWjI31w|Jx zaMmnXl(x2mhfY8F#KY93_{Y~l__jgFleSe6*D<3I7ZZ8OD)9h@`Y|NM`G}1KTd7+r zJeUOdxkb;EyVtKWDlgX=VsQJLMW!-Qs-L*83i68BdoRJ7>0A^Tsz(LckVr$ag!3z( zHhFl?nD6&tFZ~*eXq;(*3|?T~Ofzb%0ev3Mczq{cg*n>&K7O%$u$!#tJ9V{^wxCJ@ zQ~QK2DKHTe2kn1WC2jANv5-@H6eJcz8C0Z92TX69hi_3)-YWSE_96ZLYws?E{*+xo zHPR$_)PnSK9-`s6S2im8Kqyb*@_k{g2~bK$ln|%<3pEBewVd z2z%?OD5Gz0d_WNiQE3!Vq(NGgM!HjGU`T1{l1@=N9T+;KV}@oJVF;BTO1eY3J0$%* zeDA&Qd*8o)&swbI!kNQ!_SwBZdmr*#HDFead7IFzdWZ=y$N)GH;&vUgyX-&&GEc}L zUjo>7wnQ?I$p}5I`@ByL`uV1+i%5WjL~0xj;;C& zP5t-lB+g1;XsI@Yb@JD9)aA2ptf1mj72459wl4j#vClViS|&Wf*d!((A0TRUce~O$ z55~zB!lXV<&+R%EqN4SRpZXiOTnW8!k^A=)2mpHi`4@mK z|4$@+?c}_daH?;SFhuV9C5Hmm-Xuo3$&kRucBZTca~>HI+9cO8;N`M7qwIg;6f$8h zOI(!c>!tlKLp&S(@6cg*TT?GFvjs+}s;{{;B#IxGuuAsLxS~#o^(h*tEo_S)klPqz z-Ye)z;?Mj|6iMQDDp)fR&q_XX868vZM5~tuPPxc08HdqTH6_U2{kGp!mA17+3a{TH zbQe2Gb)g`xZ{sEDo?<6r5kN_%8q%S?$%-4BSOojgDlX_5^V;{ExnvGlOSh;I@p9__ z2+wLOx|WKtbhXEU*aFhXLwNrT3W7K%y)S^D9$+CivBS zaCdC3h{nAX-v=43v2>eaBc8FGq8e%v>jcvLvi#T@4v(^e1`DCHaFs{Pjl)PLPW&nHP;f!SBoxGiyO z6TSspoy@EjPRp$8;MN4?UCDmT`b>(I)J>_%5G5&kstQ@6RC15=Xvx%^S$VYk-Q~?m zdT#k-;{Q`Y~9>ufOk0kDP@l5c0PuK=Pr&bjR&; z8`@0IAn0ta2=+o+5k?$_OEstxTPm}0J)IMCQ+dqGXS9cy#u4}PcxNE0VKo%<<`Cmtn=wimh%>I~v(Y61{FJ@-VF`FL}qtBtEf}DXh zH*fkxA}v=Fp-z)uTna676GC1|Z6ordB{XbeJXd;3E!HtVOmH;$bFWi3vS zw4~x?B6W#hv7!8;IzGZ~qB+}m6E)V=Pti>CN6H4Zc2f288G92ECs88hw1h4H$uhks zl=?Er(F!zZQ0g)J1FcK!{HiGCL|`5Tx0>F@)FWqQ?^XgL77tIduRq?x8(vaer7?5b zGk8z@eoVM;=>@I6At?~pOiAl z1^1WjT8lk@+^~BG{C9yEYn=}b&^y0jmERmULxOevQ)Tq#`I8TrwuxqoEEn!qnDN$7(ZLHqr=rKQ~C@ zpw+(mE+olSxN&q2#c~_|VpOPaeTn}L$;S-`U?=08Kv{MUh0s+$%P=IMr>e;g%6GIi#@F&zsp<2$195ke+R&aa8;gnt4T#@@`tYW{$>=sA9h zK`7(=o);D+`7yO-QrfBo42X;OcPfc6ajgz!tgt=0hT|0dZDaV2<3Um3MORk+r4;C> z{oMWK*l?x=evvUTYej08b&4r$9)|B~))AJ9r~w^ONaV--u&5A&ie#EG| z{@ZwBpk!!VO=ct-+Aycx6%`E_}0ig z{7cqlf_pwG6wi9+9<#pmVRJjozmY+Dh4^$tSq}rq6Qzqe*~dW|Pgp%R`1|RAuziMk ze(hwMyTF71=!5k5^P2!^NmS@siu0tM!4SpNkNG1T7IAAv{B4n*l^aRTt%cJoUvMJUMeWu*Ubw?d34b|F@kqS60>5bXPc=20WT)MrcMlV#12PHAIbsF{YAzh^f0!5MGfO(coPB9;JXh)-;zZ3r2$zVrRi?Yk_W-!uJS4@fr(tO|bPK3nILX{E%6 zb%V{4ozNA`?BxteEUs*`%%dI9$J9BwKeVL3ZP)8CKa9jiP@2p)D@>)aZ9B#r1BD-@ z!dds%Y`^YH$J3Sad*ji2gK6@b%5?afMntt#DdNcx+Dr6^`RYM;H#1b_v^f!9dakPy z^zyr$Bb4-8m!#Iusx$)36FsHBGTq+BVd4VDIlB%cp2AsB3q*2Kl%Kx(@;CzJJ_~>i zCpQta&P&{zo=RGM@ewK~YlgAXQJ|i>B(LleCjTH^3zQ5F^is54^Cjy~u4a+Rc--Zs zPr!UE*>+W*EAaNr2j)Ooe-mN6BkhCaN%ymJ{em_Aix}4?R@$eH+6%~sLE?R09*<%y z%zA*v@WsB5xH_UJW!RKu=jK@hP$&B@n6;Blb8Er>kOt8($j6%jhutTGr+!Sf%#A^|?sTerkA8c1uJ*S0C6k=lFJexw^e;?vy26?_j@pru{ zjm_3Ys{S6L9e3wC>Duw6%2BrZZ7Xwx%Y9C7IJpmPFUqChQ7$S(ytEIeD7>93s7#KN z@%@gGr&zkjtF<0>di|=FSGvP6-M;q}wGM2AqB-%ugx8$MR3}r=DG%;<2B)R}W3!ft zgkNa^AR2Z*mm3dJa{Bd1t=eGWv+XWO%*U=s_1?a2cvTvY14HEK`_XuO1#plCl8)V+q?7tf&nNR+?t#TW}3%72^s1}L??`uA+Hr!%&%;nC4 z`VW*%7;3^uXz}QZU}(chQ<6V%DsA6B&TaifZGQz|9}?E( z2)SanEh%Qo2$!L~7oVBhBEThfV%ARa5u?$~DhRnUGdzB5v+87z5MNmkog67bh*z}R zk@Q=~&r8-&XrT40i9g|)?VlzI4&C>xOF=k#Zfd>6~LP1xK{Ml42EpJh?772m_v&M;c8(|c`02EqnlG1$7i&4LPFgCcYuO{l8i}_Uw4QLED8@%y6&OZk-gYAziWkM9L zNKlL!=}7h@5ZkC)KY0K5O{l%w@8PUU+J_gSmVe>2?N zqW9m6Me&@UZy0$rw!j;1LX93E^YH}F3#~~Je-VNRCml`ieTDAFdW{4Q0 z=s5}B8@5vjobYkRbJ1(TC4u~gefuL;utMK_E{wJBhvR0P{RPHi0b{$K$9WP0x}&Z* zzMR#0;4vH`?lMH0=2n4Z+fVs4mGXnxXF{s5HcwM5tuMYad00P8G_b_*87ulnUkh*s z_xV`Eoc|o7`}&533uti@`}6d)8ejEfLs5~*{w7$PBQPPkSzNCGKaf2VkCRAB@#IMO zRdHKx(s^X|b##h(gDc_q3w4?0P}4yTmRkuZz5ZmG$cg_U{Gp4|N6&)dsYOu9Pl=TU z9#MJUzi64*uj$*57u@~H_gHvFQ9n1P)|g%f`8AqboWFN>B%%2P3_0KY#Ta0sBmH@_ z^#jU1eZn-at%{<0>8+X(K6?aZYPn{a8IGsZ60${%lPv4%jis?8m0dlO%3_|clmbQx z;C>kqG@q=lVY)x~8X%kz|2=L4@2m^!KKznFhWU)2G1v9gqiWd>?&jp=n;7k7CfkD$ zkyx46tNN^(W8$Vp9aZmLBmD>snUG**%qbR6*+Vu!`#-hq*geaRsB{na*lq9DI)pr=RaeBAZ%{;lfy zDIYHD)wX5A&_>PPc02b9XwV1djS#)OCH*bi<2_NHe551I@Uy?_rQrS5pR$Sv39EI^ z*(V`|0K(XYJ1E<}Di3kX^Y8nBqDwdTj4uGTSehHeF+-Cy?YC(ib`Pjw_7eA^R6cxb zem2w3gUG*|m-}X|sh07NrXx|I*=ri|^oO>(Yyjn%uuy_mCY&8iEn!|PnSn(#v*8(b z+}2CM6d{y2rLN&OZ!C84+#f7)EVPOEI1jL?UB69=mO`ynF~)w=f4(7QbX2m0Y*x59 zGl>5tRJ~cfl|*xZJ`gZnNZKLUP`*w2TEmmX~oT7wFj7 zyPxzkXr9)uV=T7UDgWG%%=4s=qdHM|-vSIg{`$EQ@{wJL1i)zomQjQm>PhvfP{rCzbeDpz~)!lY^8bM>*{wL;__LlKf&Uhh05<@ zt8_?7lM}mAeNWuUb!qt-;ZdHY6HWZuhkuxc>Me8$AAjaA@(BEcU3VjRkhllw<viJtPhvj*4|p6j1*p@=1z@zN+tw1sLWH!(-Tl*$LrAE%glrb%v70)S;ukXLxIVB zB-@%)A7AJy_F2B;vccmh3%$1w9%MHc-m|DDCy%=g#X1RNq14L($%Zc|-X{F& z8*p%WfXd-alvz%!hcIe3n>bWuC0yk-KONb-#l)u*YY`WiQFCoSx6%U_J^BVS&izS} zRZ1)aXL+*CmK8l-Hay}aMJ^_^7RW&$9Y|L(R~e!6v7+|LgXidqjWf5@CN)gI>lq$5 zPNJh8vL_`{4Q; zj%zx%{s{7?4=_^TW%7krb~uJaOmnC^aNQ=#g{(nNHRS~FffH{Y+s1mI8%;TfFSV7f z6;WR)gy+NPHiKsj6B{Xb`d)l+;{9@+FxK_gsveA`X(xHwpj<){vG=(qAkl7sT{#J= z_*(=6lyT@@{2n7S1F*5Jgbb@zfzuq8DL^xZF+dCb&(qN* zB9B*-T^<8Ed;hCSz2p_+9Cy76fYb%(@2(BM$9CgDu|a2gt?Yy{F zgwG*epMV5vQ&_tqkNommzK39ndu(MeZDKRW)RU+umjG6cmQm`16bkWzEQh*O%8;KK zyb^BrR8tAJxY_HcRw4}5158dW4Gk}U{{XwI{8Nd#otKvXwdT1z^V=*AF!5EKW}HT0Qf-f>Tu$aPe8pOXRY>$>Jh6YkBBsL_ySZlHfEBDKg^659lldAq7+5Qz$Plz{)R zycGk8`gD;k&YAa}L)kl}wY3z7N_;ET+M93MyHd#Io}`{=W3t~V;vh!Vw=1PBCctpb zg(}|+kb7WG$8oi-vJWs=YlW!fNjt71vYp|659L!5*0rv>w?E{1aNrsU;zYazfafs^ z&f9wI=l9iTG-AR~<9!h&8_IMawe#ri;!d`pHBUa5Q+A540nycqRnK45nU|h!iq0cv z8EzHl%4oaRA^nG<%@50`lE}`~lMG4rBXU63etWXq_ZFZw?+%R)R2z_3yA#VxOS}+% zDJIy9xumFnfhL^4%0t0KtvMm}@b{`x&2nb5RQIn`uXTo<2N}8$nH}E zn;&uEJ76&W45g|MZs@k3{CMBSD?prCbTVIakh=$O}|0h?$1`M6vyZ~#LZ<54{Vuv1g^cfUh29vKBB8{KS2;yf1|foPWU56Gwr0)x|kU8>caz;`1b<9`r^1;IBh zhMrvr&lk|ia_ovIDy1k&uBkq$(u`xAbp*ilgKxVY$8D?7SSjn{&TuIRR?eO+s6LrBvSt{!I{Ho~nDba2Hstc|E8*r3AyRtOn^}5eT-CrWjVF~NC zE5PE1ei8adUtBZiWeQN4<@w$&QwrvOJDn|?A!D>uZ--1Ed_?YS$CV+^u;b+Xqx+LH z@=Fj_|B{+wtSn17W<KT{+s0#}xLdpK68h5_I_ zFEA2cN#{S_Bjxt`#R?m-roD0TY=sAt%J)nbi0FOUWa6{5`Z_!hreq`x*OAv{Tz(Oh#e#yF1-&W#8 zRLM{zrSqrK+|f8dTUxk86>sc5ei{8HkqvlV;BW=RuLEk*8^3yiEL zCL9AnQH zeB@U=AV-S@rXx(NP%XFtzT9rljSBRB{4EqZ>n@3X z!=2jA?)sGylAUV&#GsDMqu?m}AR|-zA?|uNiKvM3_*0;W#l*>U?bPKl*)A;@7hB&`vdHi@&Y|RJ2dH6P&iqNl{gOa;0~Y~8ENzRi z(j^y)>5MChjF&LCMc-2WH)!^lqZsEsD%Q@f2-IXg+f(ufnO$;PW7?u=JSBEQtl5h| zbhFPz)N>ohzn7X|CXfb_vwIxUy}%_3RLpIFt-MWg0nHc$mkgJK$G|4c&p~YIHkI()fIXc~pu!%3Z_0RXLQ>VU=?;_)L;tDo=;Y8U)*JDS zSR2}Gv2y)#)M#I$Bv2J4rOdFF-}$oQ*JBuAv5Dz!8W?5!X;AhklDFNA#^Gz^#Z!56 zHYU9+cjmZosfZl5c@wep4xq9s+#GE^1a!Svl6@26bIcylFs>+O$^k+vLli21Z5o&_ z3HK}hHsuy=B>?o9^-hSj)dsJ8T?gn$Zn0o-5U>N-NkmQvg ztkXgz8zY{)iR%;b7oMPF0A-4Y*lbD+qa>|TKS6j1^62| zUtSOp5NMFd0uXVUZe%haugRk3x<%72U20j)Rv*zs3=Vg`lgr$t&qD>dApkU=>4Je zmq|ErEJqXv6k%(=n$H2WCLeDebz8>@A2aoK*ghrWWUYp9CQan6)ow(6q$I`AqMr2h z-f$zo6uuk~sGmIsCnJ%+s$E_B2gMh-$}ISWP0d!&U0B&FxOwR4wJP2q(v?Hbq40&O zlnEmyzvCw02Dm7o5+#M*+w)zWunRH)55pn7kXpXW>!)sq?>}WH7zc$Fm6#XrxE*@D zmx)^0@4JQ=M2TN<`6rd>9P)9#5FR}CV1#ko^b)vblTCXL*#^ATnHiIDk4mcMAC878&Vb>QQf!$iW^FrvVsclaS+1&2MU-(^GUT#QLEj( z$1^bzM~BA*I9B=pxHx--V~(R(dRd|-gbpuFeIFo3>w0zM1W`dh%*^8997~YXuaj%~ zsgi{Cj|?oRR94bnz8mX7RCauN@!Sobl6s>50lf5-pQ+tKm%*yH`?I`sFLr`W{Ra+6 zF4lnp1y69Xl84QoU92D0z)IU>zHZ1sd7}^gE7({$Wv)Fgd1A@9oD`4 zB_ee`SD;%diRXTbxYa~Ij+%ogdsSD`0#~GcH|rS7BB3!@orl7U-R`J6UotP z$xUq2>4E$=VMc~m9(I(iz1$MGOtzQy15@+y@t8M0m4VPxOIZ;8>J3JhaqML1%a3iP&We12eR6I}BCr=6E}u5BVmSwaLA(UqaVv_M^YIcb|g zkqptXA^XyIy04jzs0T2pO@tJlq>@lVMtE6?*UvA7t5h7NQ$_M)==!|#@*PI1DZ{W{JbGPYVa1V%jc8s`9r(7A+450N! z+v%CxiF`>1?axod396C?CVSC81X^Zu8*+<_H}}>mgtc_hrK%o;hu-Reel!bP6`irW z?EBQrZZ7FupSVEdAH;8lI5=6B*4*d4Si@``{xw%uwIrznbCuAqi3|+#%2dTH z*W;|-gXj0vVuZ9TYU)9tN_^kDjS%Bj)djniwt4y$ZlXMGMXOlCy0R=L#W4+*GSw#& z=D8hMJSWa|qV@xpoZ=m0e+#gt`G zQe0py0${sWxwd6yo{tBVIT<)9SfUW&`6?a1h~Gukx_`= zt`=$XS7c+Qka zvW&I{d3BSn_WETBSVAd@Js6m0lRC|;M1yGk%?(_O*oaaZXX$*YphI2hdT1x8=_yMX_QpU943@w62DD(ne;_{JbEZK{Dz(@Zgw1K;5kYUb+n zuTI3+9cJLOWooYsR6MFqn`(Vb2eF=0L(39WvN_S<*f|3GVt1Fd!|SK8*-eq|nX0B3 zPrNdY1y7ftiWX_oMM5!2Qv*J`g5}fjQj{vQb0||A1qQsXJ+;=RObUm4c)PvONceyc z>iP;_DlIsY<$2aa&=C@2C$g%i6#QXnC9i~1U&{cRVv6E-HsE>IJ$I zrv+*4+iKOx-aESN#h&%p4u!$%8-A5LW@RyQny7@dP-|W6r8UyT+6sSYhL@iweb?^J zta$aYzChzU0?Tt zQA&ZhMj5zka6YZlj6S*c;OfKXvaGx5O~oI415C4Irx909Qk|LSi!M@uRQ8A;y{Bh& zX@54EFH`2Ih{*wY*=iJ7AiG0w!HcC^dw__@i*YqqK`bT!Yon|KgL*;v_rAHsR^cl z6?lTLXTVRK+6DqmB#Xn|AQRaIr*W}bd{V%{4DXoP@8m@$>@UmqWCN&d)xyB?$$`k! zs3M53Wsx=Nssf%6I864CMZIxHtLlfX3WGxBVH4ni*!g5n+L;LFZhmEiE5Ymg8@4_Vw$2PqRNsEw9Y+(61Gq$oDc;KBa>D{&q^K)uRPau-;i{xx0ohD=3h>gj=tEbw>C68JO=Y zGM@9Bt~&jqCq77XS+8DGe};1J2QA_t3VzBAnUYlE_y1&29pLyr;3o_f;sQaqryKpY z4uKEo0Y3gdp(Eegm&bynn#T0>sY|;s#T#p zJVxQ@(@ zU&XSevqL2xqo#g!E89I%Nctyu+3XNu5#o*OPyu#hqvp(Z7$&PQtn8r3%Cd~km-nrE z&%q`zO`@S*OOCH+!@LoF5Ju6T5>5d6-XJF4v%c7VSEb@{mi^6(BYJ(6=h?KAp+i`| z>`}KnoCG)!z|N$$RyBMZ&_Y6D1dp=*+11!`X?0?6>Mqb6A9{|Dd$WM+?KG|7JHG4D zMCwtch6$D#3|bvy{M}>R>_1xH35tmc96#)x;g0x14H%QUC@Qji{uL(Z!;wB@(0z@D ze89mb=}~>dDYgOIAQSODw&jXN9^birUBk@w?X~GK`Vm_q)*4#y&G@O`@QPQ3s>C-7 z5x~28e1j@3#|x`&=RGZsWu<7;*7h1=jw#mfVUODYfqsY~BKTj&s#?75rpiiOk6$J- zz?0`hwyjxB(*TW<)*DnW^h-yyqgTNWNXT>k%PbSX6CLs>d-h7R1M zn9J;_wfM>*g`c%Ozv59sv(Zp@Thx>XL(=VblL|8z$rtuoqrd2w(vPY#{-Rr80vn^^@jZ)Njwj@ojeaoz}1 znZ?!fBccv_`vpWweFig%huirdBl9_2y7>WSl%LK&2AcuzuE_vsTbP4}G;-yX`HmkV zMO~3CmMZQ3P+!%Mn?8Tbd>KTzihp-e|7KiSE*%aC0^}p9l_X=r0PT_|xre3uP(Sty zV~ZyU^sTZlLTu+_lylJ4eL$65%sdB9t|6)7gKg{(JvhIt_m9wYZ~>ezRvcdd(;`E< z_%d5Et#muUGDi{6S?`Xb0R^1TKSz?o!RCRbbV>yLzKyMEe#2#1-&>uav>UY}#qJi!6ldLa#MTl1`UF~lG8<>9cpsw?K zS?@TWSK{jy#V>9q#rIiyCa`$c7+>NAI(Pgz3cYzbtJl3;WatMKMB6Q~1 z3}clcV*kauOsUyXRs4Z__M=hp;$ciZ7Ud+vW0+tjnjIL^AU^>JB$NS%2v`(~;%|)_ z5&p~XwXLw{M5x*KB41XgEi)c8$?m>@aYNNLLC9)wXH98Kc1y{;Dyb##Cxc+^wojdE ziYY^`s`*WUR2g|bo19Uw<)6y_v^ar_x-JJf9-4!t zT2GyJf>Eo(-klmTZPk9?wUvn=5*GvRJUU@LCj!=XcJ0;56{-Z5Z2*h8z5Sm1wwr&$ zfT`dbQ;RDm7GseTq;2z$h2|azlz8_s%jea_6ws$%YthV>XH0E2mfON%4)gXE_8>NO zdx93#8ysN8{up6pa(u|@mL4@e!c{d%Nlj@GgL;B!z$k<=QK#?c%Z9KTX#+w8c`iMrQoL`i2w*%HC{(%*+Fw1%>%tvrUPxol09XPwB>$anSxV;9Bit$=X@d>3a(^ zw2xbPP49z1NLN|nJ0NEXh%1pEb=dauo6xUJR3?)DdM0Q8tn8E#QgC#Q_Vty2z5@dB zG72)wtqo-B`Wf!x@bVFZKy1q4rT|B{+%5r%%)9E~)#mQz2R873p0`R%o9nk*`oB*| z2ykcA|9XTTz-~|I|2zw?6qEkLZVjzH(<%}aSIevN5)XhQ zYD`Sjk;{(L_A~S=k7Eo4)|Vsr&kkb$YX^z*)bdznXh%_SwwYNjJTdXZ_)p+NDO#n8 z04nA03eCc!R)9Hjzfc3OLaxxp3kbas)`^&LWhk_}uMfU(`)e8{r3e7|_FvQ>H3Q^U z6CgA2wMDiTzUV}iYJg<__t6}R2W~F>Ur&+L|NDWAd;c#jou4rE{I8FLe%So~Ef9*y z!w-TX0-8cyc~;8(FK=9qHMdL81-U55p5~c%#u|?9{7pO?6dbQjWP6 zRxE>2FV!iDB%z%ta^LzlQqe=19R4Gx@4-)>)eS1a^Wlf6fYv8rF?sfr)oHxDcdo0< zpb;4AQOtOm?a=oU`hepbd&#)d-_1s>D5BjDci;03uDPoXG)KlpjAe;qXfM!sL9pEK z?Yyv&d>4VHv;IzHHMP-iVY?&e>ql1e&F@}FRE7S%3yKU~q-_1TIUy(Ba0Y3;7vsLz z=Hf8ZXzaGu*KFI#sbDv`s2}5dBp&0tHA)J9xwNrSGH&9plI?#`6##A6FSY-ycL$_g zw@BWqn*CaVf)j#uy&N*3vcaw;5MIp@4R;k8Cl(bI-OM$#56V;_tklxC4ssY@`L!{M z(0gp(e*G>eLnB8w30lADnJF5m-b`^dlCLDtbb%G>JKym2f8(Yf+POnAS#k&eti(RG z+IFrHPEY-}QmiL&@R-KL+Pg1|o|fay8?+dDXJw`AZ0=l7@Q&Sv^E$(Iy+=WsV7+`R zeW0?t?f7U>w!a1X45~L5=vmFQQBNhW2+PfVLE*9Z{RQKIh^t;hmANjQ%K*X&x!(Ql zV)y{@&oC#JPGqaWuA+qlXHDGmfsS`J;Z&uIL()PIB~s9COxh&iBw`l*0MwPrXilAe zx+BWIX_)WF`M2avxdni)MCzN2dS@2hdG(az?r14wT(D1nJ>9A0BOp?aC)u3G>+z-y8-?i=*h@xA9MGE1 zdN;pHpb^slau{;hfWd6FuXV2Pitdfa(ixycEs74l(}~vE_G`>P{de;>k7uMtA^Rnc z`Y_8ln^19+bR4brtjLMm?5TMiiSGNDv?u?@?54ikWV&2dc4lku8KZ4Cs@p|emn+5R z3R`JK{N4_<{B3D`<^`X1mZ|*JI+FUBO=cD_6T7dWn- z3W@GIt}vGQDMhE4#QuDd%ni^3 z-@M!`h==#;L~Ff!?#0kac6@jE{X^m7c;?y0mQ6N02geY-Q&pFd*)bQuxx%=LjfJ{)rZ4j1Z&FuYk2q_-IV88Vc(Vj6IOA^Q=o|bk6AD_ z`H0Wa&P^uXabs8`=7}I!R`z?H<8blh^i0^UgTimC_aci>+MZ%a0eH??&?sG~kE(zhxfICsVY9wtLOu7p7w$=>SfIErZ8Y9uoWMjT`xX#`<{MOxJOx@wugGxem6s z%b`!2jOau?AZ2OpAFuvumCaYmyR8j;Wh%(e|FAdr2xaeWbo%Udf**vMJoy20Kh zG-x{ub59v&^-t&>jGT_F%p{o$g7L`wc%|H;3VQp8qnEm)P^*>YmUu^$SH-|`B^nh5 z;GE!!{%lEWu@2*dZU=uS-J)0{of<2L5PIQ!)E30<0CwKtLD^?gE~m1(u~m|-1mggg zBAtr*$I!rm47gcu{gjgVxW#qn_;B6&`1j|yQoxK(RkMyK_zyFk}+w#80o_C+&v| zVwN-QchecGIG5$xhx|l@wiTG&?F8NA=vFnJ{|Wa_nOS~C6+g^3?UA{bX2k`F zOTPHO?V+=p!mZxaK;~;I9$7g#4nX^;U7iwMSAR6k>OAc&w{ve&|L3dx`0<|(hp1Ja z@!$|igmwWIkm)*+>pk`|{p+w*p1<8*s(|Ke*qg&2ZS>}BZ}0f9w0e>z=o?B(Miw)! z*I-}uWW=fX-pSLAh(DhN49CARJ$(2uhWZ9sZZjsmf`yO&`Tbe{um`uy2(7Puv+6xH z^C-XB{{rE%R2Ug8hL~j#JsBNg?+}uf_SpRN>EZ1gc-Qd{@R&UYIXjle(*LM;(-LvN z$j8OSb@MkA-g8w}SAR_s2^*=Bu@)?zR}`>PzuwCYS2-Q1*fde2r>CF1r&Z-rb+J0l zgK`^xt)#s(c zbnee1?zGETQEi+icqwYs{8C(S(Hl@IP}oPc8cLt{3$1{&phL#mgvi9yZOmbwX@;G? zvBFe59dXL%&$6s_`Zq_6#?;r_tmn0!Y*n}43ilI1d*p%nd_tIljX5&U#79P{Tj}De zrk*=k zYmMtYZ};`zP`k0RA>^ch4%+dKzt{oM()H=iu!#)4w{%2A#Kq>820SAJ`Sam}zJ2s; z;QQ=Sa9KH_1VuI7XPQr+P)te;z8;p}#VrQP?VXO;_ZYJeC2&DP@}}*)uj`g}0mE18 zedGfuuHtu{+)#RJVTpQI+D5O9+~zb9IoR<;o<^x%@^1Y@UNTQlPjL~8l(BMyGRbkI zrn4r#`2G*Zj_ZF<4>BBgMBm~++#LsuNl9s-VAIv2o%{=DV;5M{BAJqp*S`5wOJIe> zwSDf)?92(a_zhrC@0bUYWE5y_`gG~ch_v<}Uqr6bHC|`hyd%BVr;kuA?J8?=Y4LbQATL@tvT@Zr+71dv*i4~KZjJBeXiWwE zvkMc}5OH4_CZ)a#o7A;=mX>fi(2siI+UjVvkH_BN{e!+!2|xrk2lLQVc%CCu?ueMT z9iyY*hZ4N9_z!o+(@==$JHG0TJ@90v_V)G_;jCu#v97i@%s60$MovCUZL59zg?n>V zdh_2+GNVSX11$B-E5~2d%&e?-fZe?KgE_Yay!o;lw~zyU1ms^%cEF7kEHN#u6d(Tq zALP?n<2fal$qhoHiBz%75M`L{cZKpf*+!_Pk0WF-PgmA-we}j{jCD0X5Oq-nykE%1 zP{FO=$ZZ&&oKM@cvv$_>C6s2_4!8N<>U?Ut-u3xTQpsWM~^nVdx=1l6> zQQ8X9HBL})nZCP{jV0l*8*mVb0}?}H4uMAJ25p!_+Uxl2c$%cPAK#8n9|E1N{!iwfBGaY1!VSQwp zQGL)d@Tv>nNc2yqNf}9|P8DoN$x=QJHQX$Z1%mk0sPniLGP=R^Z|FksZqj?^YsJ1&R&zGI;(mp z4LZhem7u+&o#O(_S7AG93-HAh@pKEa=gBqJEwY8*Q@g~Bd3Gi*4x4=TH9g%Rt8;!P zc7F1pVzDAIksR=3*)g$z|mLK&f-%GSgaWt(UFsv z@A{{F7%lz?kXgc9-Zj&5%;`NY==T?`4m{0QwWs+8wHdR9Z&%Gd)}A`SvQ6ZgE^ma& zpy^}u>|$5Zp%0&yjE$rCIHo(=+c!~fjQNc_hld5m&3&y_8hg3{2ip4AzdqM_z>LX* z8wBliOH4m^G}QY#0rC03cM`$HjU0K>-YS577LUw=_|DD|u* zA;QSpsMX-BV$#7u$+wB`Da2@T-hER>nb(4XU{MdpQ7RLEvzhs`q?=NGRGNg>L7ATe z<0=+sa=J%!p!0R{yk<^L7&(pi*msBF!jsLJ4HiM08P#b|dpOgz?`k3tP%sz3DPyDJ zz~`zo1se~G2l;Hw_fJlS+oPz!0+mU&_r|m>MCz_HcwPHDO*&YooRD~*OtJ0SK~bgoan+BloaD=DVS9S#8H6f^V%CeerdD&vT6T{GwovKY>KXW_E3VTfM2fw$$_*_s8^nIv;~EQ%F1?jye>ncH1toOrbAYL_w;a%SCPxA zs@7KV4B3sBCopY{TSf`Y7|#_2W33Z@J^-kltCT9yugs4C_% zE{twCzk5#w22@a%X;>!(B&tr{CJ!xs6R=iC_@F>aZLDpqAWeP>2yp_SIZ`G45YMaz zG;hsUp<1QS**0RlBYK>y6~AY6RM4bTNa^BCS-V)x&=7es>k;2lp%CSa73Qx;vlRit=8SVc(T=O%umT>q@4W@!T|PsMH6Mal!EQ&7Kr-V{Z}W4qfp z*>IL6b?ZD!pKgy-&j<49i?2}J+PulOj&>Rs_WyQfN2Yaa=)WoH3MfXoH7rJFxNw}hoBykpwLqi1G2q)`jZ=xjqxz8&q-#KJ_>ibd*QqJzUz?(7whc|x1Xrb(lC z#eso}+*==LK8dH#`SKZO(*MWYTSjHscG02_C@KOXA`PN+r*x@wOE*Y&cPmJjbc3{j zba#VvOM`TG_r7l5@B8-t&K`T5amM+xpD}pHJMf9?zOJ?AnscsoKN!zl@c|$6=k4P= zMyl^3vQwq{_JbM^PS1b$PuE#WH+DK7$xpYp1MhDrofHT4O20Q|d=3~U1IaTL=B}#a zk(n!*$YG*=2CE-E3NcpQ-c#bx zgz}iTw{MPTH{W)#)kmFB>3LUMXs${b=A}oyv@@akcX~2doT557$*rL`=I|N&w725` zAaM4!XKNBMX>O)2swyr>u9rXQ$fm2St22@GX7YvBRj+)kV-G7KAzihr@7GGwE4pZR zP-oL~;rl0aqh97e3dpZ!=a*I3WOQ_7 zU4eJ390Le?wH(LxDdGXKrQB?Fu|3<9Df3y>`!j8A#J%o&7C5@$eWUETxw$U3>c216 zehm-VSyGX9QA5E>KIZ_nvx$)iX#4>lc_UP`UlbQ0Gy;{6GS9&&wXcC`fb~?&}m!GnF6o47+q3c;)T}=fZ zGY?af)JK2%9RjJbAbK?-DH(bTXH$0>Iileo z!d42Z+ZiHWj#V}cq@YY}TQ(*wdLklXjnVzc=;ryR;L!D+jg1YV!Z^p_iDq*+!fZDH&PM!~}h>du8RMtg35{shUR7sM55MFQ`)O+LhRE zn5bk63kz|kb=I;}D_{tZ?T2*@HK0!^@HDYcHdr!;zTo$f!K?%C%b z?l(89Mr8it&b!8ZSn>qM&ej{PA9GXsF`3hT9GG&$mAjtQR8+ET>I;gEjUEYHmZ7q; zOp1b=bNNMH-dsO)I>bQVApRy_ejI$3ejPkJd7ALE?Nc_b0V}nc%H8qpHMNtL_9`0* z_{*bh>GALHi9>LB6d~QgcmMSV#1Ut*lNAf@+I~y=0X&*DFJ^Q zNlI-Td|{?iay5uid%pMhM$>f4TLj z3&Nw{^0~5EAo}mj2t5z}@M<)=sQ?t{lTEn*?o?z^QL3B!t)1Ee9AvBXtFelSZO6`4 zh@&>}Xs56B-^)atTnwR2SGWpSs`O7;1Ni~*nAP4`mg}zgKu)Y&McTsA_ybQ2EUX;R z*x8+K;D2y6mo<{bQ(-=%Ra+x$SyA&wxhs@lxbgOW!*6O7BA6$grk2}-EYxG*b1IDH z#az#BCW6Yf+d2co!sG;MJ>nGwy97+zIx7GeN4BhoeELI^KPa|uurz(MsEI9K*Ka^Yl~E=9kiA-#&3-p6Pa)s52a6Cd z?9$NEU$x3219%VPp!YMroV`PCVq?R5RUbpA_LH~4B3fPDZsnKu>Aqu5p8VNF5@Azp zlw*?K;Kj<-Z*!K5v@}jv)(=&v)oLJ1uG}w+BK)NxbA5x7jry|As(xg(n3Y$ny48=h&a}je zPsC+`KOI3Tg4|rlIIF3vgNPf{<%7}^s0`JLtzW?S4AnPzuZ{4Z)RfQ1{0=!WRx@87 zT$g*aCAe8?$@*?{16msk452xP?OniQUrILc;~3f-I` z>ty<9LI}#el0j1A(KQs@lV)dNB^vc7^9gHfcd;1#AwD`f8W|#gw0g}s(uT^D4Fsor zf$HaHguFlKlItL8;m40E*5jJeeG`d$bbwylXpbj4a(TRdav2+0u@Z}*@@fIZm#8do3<$XG z-pTP^Q?xz&;L+aEVNj#2*Gn-+P|DoJ8-93b?Xp(0SPLwQv9#NeE$3rePA8)#$V>{t zyq`$e1P!FW2HcdP=5L@}qJaz+3|iOU3^N)>1y-rf2QNUHHfjuox`Av2jt{d}6v%{z zyUt$R(<3|xl|+qs7#G|o2pMqHh3 z78Po^P=M}+L4S(&LyCBfT07Cl&-O-73ug2v_pj&9N8jpPlu>C+Lno3BoJp2xrrp^J z4a-ox0^Ww}&EYp~kcAK8A`6vlttoZ`O4Y^PiPM$haNM&Cxrh129 zja|`NZ{BcWC}(8vHD1H*PL)xM|C#%z!}SQM9+ey0hWt{=B#Y;^y$gq{By}m zGhprWz7Dt$AcE>*HdKn=I^1B-xt>2T8PBVrQp!t5ClA}MTJRi>(89z%#=Yd|)UO0q z7Ucdw@eEwv^i#awMyIKs8=X=b%-nkt7dYmV)ljEYm9b>_3djK6!|ptEVj2Ghaxg}XYPN`?1%JMWZ442vao)v{+c}3CKu?^+qs6XCKlTspZ@v& zruG=vf0oVt98y#mSp5mC6I_<=nG*|c9D^xY2LBpjcVP_~|2B=f)E>1C!7dtG3EO#;QZW=Kw9zDW5h(bNK z%L|aF!o)fG6~0?lgj@sxYiPdqP2zrTc}`A>tM=D9twhj>4ir3(xDO8To+e93G&^*vv zKVKEMpCCBTy*GE*j@sm*+rRJT2zZd|xLn@KyIH{Ja`>wvhJ3*o*Gs&pdjuG)1MR;ja9AY@eHbwR*9n*JBLAO9fBt{Z z4*lQ%25oc0|C$^6%pr-(Hk!fjo2%GA9T*Zae&fSuyqI1CnkaznZL3u&)Yz=r9sGf7 zIG$Hd<|15eM*HWL?60E#HFFi1OYRpo20T67s|)aFc6sWY5c9ZyPCPJeWkt`O`Br>+0l3|=3il>Z*PH~R0-f%K zg%(f8dqTSYJqz&6q^QA_|EYV@Kd%);`OjLE`nm{5*euE81K8aO?Z8Ra#0=PkW}|!P~Qb$J6!$Kj6N^t=fYQ zc&?5hA1MjR%e4dXawh$rV!qw4?#5a>($_kk(^%J|9(3v=R(aUp;wC&MW6wJ7%IVK? zZj0%6gKkxBqHgr!OZ6{4s6$5of=9ij9z{gdY7d65+;6%ct1|36BK&?LzY*T1qPhRUt-dFQ zHlqy9kl?^VEL$v^y5zrCif({*JdJui(m0t4EtBvp%e{mzh)_uwK+)d^g3`c@n(l&= z@U9nE517LO(<2Z+-<`V-BgvKCpE&`p=<4b!t>=B7i_N4+N2kDcdpEjs{XZjv@46M2 zsQ)$L7A25#&yxj-OJZ-ri660=P)&q5E#F`wsp!?w_Tby2-1XC2$K#c~wd_dpwv#P9 zdP>H_2yfwMf0yz{`QO8GFXf=A4Ge0}!L!EdYHTpKlKo8>8bgH!QHLQs`k#;g_tC=t z`{Ch#n~prsKTP)~bdv`>Pzi{M3e)pv4d*RRARoHgz`lxRXuZa3e<{p6H0P9>LFED*@}I{$#?DhP75PzpmanX8iTFjJz>dy&4--_S)(7TJKu^HsE-OdA(G3B` zSuf7|p5ObGLI~~`m$fPFgriFmp!q~@EgIw^<0m`zx|zTv-3X{sV9rK}fWt-^1X8@lg!3aNoJbEaWT_(;vNS@^=HXx3Hf*lyAtKjKrN4=rS;G@i;%p|J}MM zvr1@@FhPwuDY$8t`_CDE+QqJ=ePB$&bg!PHv&nnLVF-*2MPvd&{}K-`qZ#*00+VwB zZ}LqAXvC)z3-gnT+HbQC&AYn>uL}taQ*(2xqu^y~sOPHXfbQ(P_H2{wgu{5Dh8l2F z&OGpxB%1;>7DoX#nrQPbval=yD_YPfcxYu26PS8gsJ+7 zr4E+gx_Gr#0hR@r;}qgOO(&D|kOtU7Dhir7GP$y^T-UJ%mD$5 z+tJn0J5#6bJ8=kF?m$t42FpgP&qHWN7Fh2QnMbSc(1dGp7;o$@_Ez~b`CR3$sMjY2 zN}8I{y+34E#Ue7LbXCP7q%1h{zp12A5z}h~N-27OujO=P?|h@%N6)9lG+LlTsQM3t z6YdC`f+8^FbKwLcmh6)4a5TO_W{Z_mL{|0DMJK)E7Pv^!kt4*!F?acIn8E6#;549r z1_!h9>&@R%`jG@28I*#h+oq3D&2xuX$Q7#OK4+G16;}2X;Wf<~Pnd&US`ohTBC<9_ zA4~V-2VqCNFQ)&pb}cir9HBDJwCyE#dDh%=hF3M1j33WO*oc-{e!DwEUk>C<=dN&= z&(_guZ><~0^0J1;=+oJxyYtM1w-(u8AS)(H(U|P@cVFPNh(-*2O;v=9#g)VuFy`iT zfuk{SHj!6Z>}G^LqCd^H{zS*om@!GX3^WyNJ<;)CCv*4{upat7KG-W|DYJ zc)zDTvv^8-a)>cDO;wYsvYlSn3bYtbPw1R5n{MbkmmM;7($}Ly%q@MX-d8qGMSmfa-tGTH4*_D0%!{&J)4di<`V+vY5)Wu@tPOGd35Jf#tE_9mBKCG#dqe8EMD zg5&KucpS==_)Xfyn-ciJ<#j)j10jAe0(|S5;V=!WPYSZ#OOY| z{Hrt&+*I-Qe<#8Eanv|Be=@d4vg&$bD8a0i<3c?~8=uS{1Qr)i7crQ=d@b^)X zxPW<&m$H0|$67e<8)9l(@7dXB3e^A^0aubCZ7$!0VQdtO91^^en zq5(kt*W{HW0g6kJAl`ND)z&8a6ivbf!1(;rweCgY3eS7Zkko^`Ld!WtXhI4yLxn@} z6}z4#kJs3;xpV5Lw-UDkMIDZN^ov3AZsmb6Djp1$v$RkxyLZlZ$gD~kKMuw42lPz`8~6B&jf` z&&;w=kZjTn!POsGE*X+cO-4XF69~_IC@JMI$n|~g0d7xIKq`RPbSU9K?HDt+`&Lgi zn(_kNzG~&@II-Yq3C)d9w%s5Qz1Uk)^GA4OyaVo)0oFF8-w>t}@GbNozQ@0Y!; zMZxDb0o#&QfnS{bz!8rildscq1Z-ONK#=+RcBM%%fU6aaZq;b8NuRwo*h+#_Pb_~h zUk_kzI4`;qlZqOi2~j~+REf%!#W$7~#4M#r`m>Ha)$P{04ILyCq}CWpebXkWZp1M0 zSC0RdPXf5$(+#d6_kxVP60z=28g0l$&gJLwr>@|`g0cPM9)lk`@nJ%(*t&yNf>%K-DtlA+{Pu{0WBQpt4cwOTzGP+F!&0qC6|& z(Y77~TgOqhN-tt;9c;eGR}~{352~zwI}cd;xX!x=$?;mndU(`gZu;>-GO;B3C_+V# z)KuOu)25oKEX%6CPOM=wt{Q$H$d&l61KcgU?_lpaL2pj^~!_Rl3QO%PG~wHwE$s0FlYs`2zc=ui!?iB5-g6Hi@3gVgLik zz-xm8k7cI?iCh(W=zpF8ao&6h(YBJ;4j>Bh00jVhj9Ja;ekE9IZ8*w#@RDSzGw+3# z3fbZ;DS$@NIbnpUEcv)y}TWDdK8grxmZ!^@CaxoZ7oPM zK@vnE;><9I(Kx?gLUOVok-SnSt0$TytN)TPDb?Z4#!5`t(|C+Lpklg-v&ws}U2(#C zLo6GPBBHi+>Hn#<`IZ3pUI(4jV72h8;qFHDfov^S^SQpEI_zeWJ&RT?^sP;XFF))} zc5rO~LH|Ru3pZlKB?R;LH8|9Pf`xWEsGxZHRnm;nSm3Go#6x(+8V%l*`u`}!T0~-U zHEG`!1>@!zFL3_Jry=lSdZm?YYN* zP!bDf*R(|txAk7rIo_KJDdQgxJJzQXF4h9fPr5E7Iq_OJn86)SKTmd}0wZ56_~uqa zi`=sRnFgR$MAjEc;ZibgldS})8_e*2e+Q}`W6!&503ABvtp?(8n}Al_k~zF71O^Y@ zYJCICS%~8pU3CUf3NP`;-fPsI49z#(i$s7~rQvF}h0!oR&muqo91e`&U{^Er<@?)$S4O9ez;BEt13yfJ?J9B<>L z+4RMqC0N($QnjH+6I>2Ml#bDp@-^A%YUHfo(GjE+8&lKQw0|Thp3f}4ps;4B;F?6) zg_~gzOKztmk@`m3D>`&0HKIO1sIx0lswe(?eq?^+{FvKWXS=kboJ8ET=(O*eVHBI13x;PAuMxO(aSt#MnsgJG^9TFv~2F}(kv%8|E-gwc3ogHS{7=s zSx;2%#cx{2Pev4_gn_Y|Ui!T~2hHC3)+@izX3xDJm+VJ+)L%_^B3DHD`}>zlN+I;d z?PfE|VQpzR%F)LZ(0y$)Ct&o)99}V~LnJuWcygsrqoxsuF_;LRKGJ5!wrSz0DsE2R z=YS5L+-D~Ter;#it7fSKemd*-O1cO;b;J0Q)-ch~&{iuCE}go`!)ix4f7|yMC=YIg zMbg^dMNep=4pyRC=jp4quPUi~VEiufd^F>Dg91C~E0bQ-YIGp*)(7v)M5CDGC9^7l zLlzz`NWJMI=O6JgEbgfG1^cXGbzt&_#g>ADmpCturu4hfbx8q%DB9Ve!Im~Y zPkcO0CzK}FD`$1v(+Qn<=rgvs$W(j7p>H~xTS{pkjrQ-+gDfZN%P9( zqzwjByv3VrI6O`hg@x7E6ZSti4qo@{Ys~Fy&(}N0F=&6_OcW6i5RfUQlo1|y*lWjj z+PYY2Qu!-lfKAZpd2s$DV%gl+k%SNBV-BH4-cIuY$gKhX{x(y2GkJ&SP3LJ_zqd>7 zE)5SYCl-P)kFkQQ=L)KRwx+d(FH|ubPFpLTns84PW3nyS@PxkN@#RvSD^Mp);k=mN zUkC`It923}uxKcrJbJy!pzQn4C5aTNcSym~cJ>ch(;nUi6cCEsX)C9D>9t{yT6d)wgDl)3VD57fHIYqws+1|I;nN#18Js)2SMP;X{HcYg{;o4Wj>ojn<`N?| zIxXW^+Q<}j9LaN~Bz1A~%Dz}R`wcd?(T}kf5VNwb4idE>_Mk_Ww{8iF`)-|#yaLS6 zmF#NTF|0adg4us`j=76FW+hTv6SrowsyzGoT*ypGrVbi-f?>yv$7WXS@??pwQ`zb% z+5`cg2Xb2*Q??`*%1H4;SRCu_+az{t_-j<;4sOkqDdg?olIz$PZH@9+*K!&=(x=7r zqczsxamx8xrujFwq{Cnc2Td z$XDyC->t<+{5bW|_q&#mj6zG?`5Pjl9#Y}3R~?qAxk?_X%!YIWR!IS<_v;Tb< zmn);komPfZbq|+ppQoHotg8Aq-JA~g>_2{n-`v~`!=zRJdVf8y^(SG!H6!fyLrr7; zHP5W=go#8P101@ua)??S|=sH-fm~ zW~G}=Y2nqD4UKZ~^CFAcbajUog!^FUW6VDw-F+QD5gx@R`MIa=Ai!R(q=tQgvz&E~ z(iZ*j$>_Oul+&(D=VCRR?c2d6zEP*C?3 z@kXq6NwRv^bCq4ewF_{OS@z3?ijD5;1!zt88+U%ir}X<2Do%-PjsDwB9_UvNyY1n` z!UvWUp6K*V({TaWFVN724aH2K4$IEOZyNj{%kk1LCnBmlrAwTz25F&s;dH(rb4agG zL3Q)OqCV{@?*fdq;f`s5dznW4D!RAWW?YhSpB#ZynI=a_-C&^XHTKQe$6W8hhTlGS z1=HEm5E<57tcHmEy&-WVqJff{c8&o)${NO!nHQk3Q&dwe2xhV%uiYuJO-y+Q$A~l>FoCa1e+h?$4@N76mf|m=|1m z-~h@;{eCHwl=1kMsXpbi#o&zG;Xdjt*HWn=-tVO;-wGTf6tDxUUIFnI?^qec4C1W- z&Ll)d`SplS7e#erEjn20 zw)XjZMjE}$?l&mlvh1|G1_Q!#$uJ;9j;9PbI7y!Gv&DNrk+T;k2pF9call)l2C zOeP$^OSVE!SjS6+h9*NddETvgb2OZ~oQ-GM(b>s%I9KQS`QBqa%qiE}{lob}T@fM3 zeEy58K)k~h%ao2NO1Y2cy{Qxvmd@LfW>ZIxXFV5ixlfk;4%Z#I?4mi@<29_}$wp+D zKm7ywJ=&78lzcoGBmVZD4?(G2(4fyf&f>|b~c_;(wZ!esq8n{N~7??BiD z<~?PadkGdhH+l3s5&A)#JM+Ps2&vvl4t1y>9e#Dr>mo&vv_Z$-U{q+)Q9ABZONS!} zqDMer+NQcd>@|<~Q;JA@3u>@C3Or=Yzn+8Zy3X z@eYf0O(i>egz~omN+Bv(S~yzt@0XqZ!RuCPGOBV<%H|#_m7w>J7fgcEg%mT;Q`_f= z93vWzh86p;n|$4kfC;RDC(n$6|ImGNhQPSJnUd5P$j^Xty{nDhr$XyyaW%0pS@(rx{aOz?K zyw^PqYfR#xkU^cO=yhrUO6uY&q7FjOKT-Lm2NxY2#2zl~lbW0oHX@kD!yY{~D~Y#{ zdNY5OOg`FY^~_FWv>;NT1RJ#(gwNi$ZvdRAQfkCs(IsX9B)!8Jz9V ztXQeN0d=13)IhH-hl1}@Wg=8T=uowcN&)9!{7X6P=rm)v-$0NXU!19cmZPO2*y>%`e5&Hm{ic1_E7=WWXgj*)ByN{odS05FTe{QV2E4L&=TMZOkBSH&S zkXLyAH06?J8}O$1Q?5ejh%efedloCxOVf}eq(iGUkGlVTYPlt>-ukjpC@i9h`D!)S zUI1SV<5b8PiI$T8*}BwUB8&Yo!8%T{W`G7DJ``$c%2+jXE- zhQIns-fKlunO0Oln8o6=VY)4BYrpLezBvL11c`5$yGZAX1Y7e)`{&7xc0_%;hD)%n z6NzW|zEd%=n+DkM-huyopb`;I$zyJBW!aJ^Jd-KDm@;&SE{_&fIn`9S;XkZ@Z^>c; zHJCkB3Zz0UNslP!tRm^Js;o+MKy3hJh_^_wSW!2?DI#x1reZAq3I|r!i2oG-3n@wq zlMw0+hH(i;BQZEE32kFjqFq4kzj`glTQhO#Gv&KFDJE_>wD(#;X_omIWK7RsPMx7p zRIFL%G?51#w@zGeSXTSPQqw3aY$UR+&6U z%qyaqaS{i8Jaj>~6pq^cb?@V!;IO4&X8frD3O({IJW z`lq@Sb_BH4_M{@@^5I4?*p6JCSvnB?gA1a<(3BasT! z`G<8msa?KUnr+f1+YGu_-PY}w>z;6fMW!Eai&DBjA(i<}S$IvLW821R9=anKo1n=! zh>?vNh?^VE_J1}|dsUwTV|$7#Qcv>(#Iteg%z7x$l|&<46--QP2}wvXaGz5;HED81 z++!Uo8#YN&gV(kc<92Cu-$B6>A^rQtl|!vZjEVmhz|-2Ltk~}d>GX>MTK6xiJl8St zxjc}mn@$N}&9O}$lIQO>jurlNN`nRU*hi!2|0+`Uha3Eqov6dkfkT77pGm3Goe2k(hLJ?G zZmI`A*r1?i$|DvI{WA;{=x;}^Vn7w&vNn9)Hn_(EE(=Ul;Jv)_@!A8Jo=T_2`K)ha zBLR+u{+Wp0gJ^&w*}tFb7s-2F_il?t^*;vm+}Ug`ixJ?e2liZEJ}=bmlqGCqpku*hy&vVLxA(XH%st~8sL~+X40?>75LkIr?O~yj)wt;YIHJiL6MYQFghUUid4az7`B$%^9;~Zn%m{fU@ z{1LD8_0R2qeNYw*5CiN@yuOi}4O1C%h($BAKPg7gkoRu9#Z%~?O#fjDfG{ZBP6DEr zPZnC2=u%>4uMH9%sCOAtk0buRWM;w%b&W5NJx}D^wks|F5Z<##47nfo87Lbv6ceKv zrkJ*+RNWR&nZrJ}Jr32;PDA47LWPH|C9Ss)g-TloGNYCq4K_EPrAe@|Q6Dtq(h;Am z;~ej@Uu{~i?$h8P(Z_qI+)w-La`7WQ_WUuQhi}N;g%^Z&H7a6&krZ%Smnox_f4~%gO&R(wATRhMfQ1{j2Pkh;V#@BTx=|BIIWaueXlu`>`P(6bFXtCvkU&WQaaO1 z8SOdL?LxzTN}-#3>x#M*RYumbuqo`Ufd~R`USq+#4Bl`l$upssR{`-_M#E)u@TfK- zF7ZJ&LJraXP*mm&B`dz!{!Wl{gW{kX{Vm5UK(ZhNmQnaZhQ|_N3`!O?ri8tY)u5HE zdmi``zn3gf4yBi@n&FUjlQW}O;78C$bw=nTgAhLXvK-TX`u0pm6rqa&9F1$EssqYi z-W_kB0TO;FbFqm&3onlo%=PMAuhv+Fl5>N7gV1-m>@cBP>ZqIjCHqfxApv4g0st*z zycg>GYpjS8${ls%pjpNm{T$;cBVolkm!s`L0=7qINE%Ogy zzD7V#(0&@G_L!wX3(>{4wBksP0xQvZK|KH6jRvHhQSPpMjlvb+JbNw>00nGxUhD>iDVD0I1s9W&`CAZ)sGPD6dI}U!yQh{nLl%Dgw_Pl`GVkO z=xnPC0M}+*H|Vf8IdXZ_MSvVZ>)H0Vn!oEVC5Gqs^5EEnVK5e@&>Y4UW|c+1`1k>Y z&Gf4sv8%Xcb2($lv=K5i_Q+j4eZX)ixQrMz2Us}pXoxTM^qcWsF+~1)Hw~(pqg>H% zNfNL>@XcnF%Pn`{|A-3wCFM)n^Kll`=K=wDtb|X;_PW!KGj|rtMxzD5-=(mm*oSA> z=Ije9*GlB@=wuLx5mE+$qiztzfirw1UOs&eqt(5AqP!}XeQ#$Zt@-#Oqa_mC*PVsk zv0~(DAQtR(H&hFUl>#=50{sQbP@x%xuCEZr3dq+#rw^lAESXp`ZCCrdMDao3??jX-qcAwKSuoEO8LdebDlgMtjYpu9hT zOic=1^^SqB=&rCoae@dOgfzsDngFLx6&z5*d3_WQYV1n|j8C0j#(x15EmtCQen1Ei zKcIFf8jD`%djksVPjTD6auVc_bOshy=MSRcardGGZ(bAH|zvj}@X6BHYN!`hbK-GnUi~M$;{dnU>Q3 zS?4TC4$KE&V?@EU7%OS=s6KXzL`Zk8u_U7{v%%1oaSS@-{K#6E*ygR6Q*P&woHoyk zmKHn@2pQsqp2ZkSR4w_O7X6ub-3zL&KS&RPT$eWSCQl61+52U*^Md-bM{l{yAimw& z&sH#&ehCv2GDCzeom6P{J>W6|LgI?tJrCq z@@WhuYH`*7#Vs;=k+*U)P*%h#o~7=KR1P5Il;V}JaK4Ta!8^1L+IGOp0t^N&G2Fq; zH}H1l_Re!YXew9f4^%-q>G>cMJ`O^5Thz@wc!vw?aq=C@aw+Z`q1W;fr=#pxmL6LK z-*137nl0JIru(pB)}@yUpA(;hA(dk^v@TC+xyHAm`Z8wQ`ecJ~wq(m<^$M{0Pr!!f zKKNrYdwt7gJuHZ|+*72PaV7_Qj#HulJn9}_rl^VN6|>s81G@x3P-hW=^Am*-Xa6^R z{F%}~(El~LOPGK8e?n>Sf5yaU(%j)xs>`BO_2H-+$hp$b=<`?s2K`T)nx?-9FPdZT zaqC@~y2aWq6bg)+F#WJ+ja| z=cm7D)Hy6=b)K@%R4^>Du4AZwMnJ+`JZ01D0DPOiX{nUS&h8G;e7695ny{HT>=s$; zWe&y#!;J_JE0u>pH(R?1;Dp*g{CPtlU; zrS#w!ATRA#vyHyeTXKE?l_Io#rQ(@%CEvaw4zDBXWatqEJG>$lqkvoPO`vml_h5O( z3XM0lP@(htcTa2swfVU98lBL@)OB!$nXG;@fz6@Pk&UTOq5?3UqrVd2pZ>_Bob9|U zJ~?#Zx_X`-)ELZjTNk}RE}arQqo}wXF6G5vzKxnyR0DnOn-u8k z_Oy!px{luq3%R$%mewbuJF>Zhf4(KwH3L1gb+&g;`GOCU= zA68?fdi#0av9s16;Ch^&9E05|qMVQf4HJ5{h5#it3z@q)Pc8FDYCyHWOznvA`1b`-r>st`YDx#Erb zt^S0=3L6PY1AE$;p`Uh)uknBF2K;R^WddPs;}4vS{H(W7h!QO7O8|eYRU9dVlv`wA z8<8YBo`iFV487eTU@u#CTRnEn z6ru?adz0+5(fO2b36-zw7XC=z-wLlDiFPcPLI#tcy>FB9Z0^vyTrymSZWVIXPf~_>;AHyP@c}| zV%;_==u-RB7iq%7CeObR<|MHo-btM~(zp?3hq}jsL4D52Fg-iWL}T@ewUE4=5-dnR zMb5B)Or43sWcD`eM>pP$5^Rwm`6PfTO_GZ2EHcL=k;yaO%OIKPfP1$r%-4oQ zUm)kD`SpsNunS0uyc^d_ZQAMk6IR9fI}})RXDr*FrF8n`h$V4G-CB&Q1qZCk#+lrQ zU2iSj&?Yn-A7G~3Izu}8-r<(*@F+h7V44{C7Vq8k=UkLtcZ-?Zr;t>?T2;l#{;s9i zLsm=zxtt-wg(0aFT>j2n4f;b?7+ff5ZmgB5cs;&HPp5L#IAt8+ zNk6(JL=N-dVISf3M{t58Bwl}ny5n#pc@^yz$+I9TDx$twZ8ZuW_-h?w);yM z>{A&B!>X;^!B7GxcT*u^RZ%TcPLhonS)v3;tVrFG{^!eTxT2?@;-Ti76yMyJL`&7& zv2e5b1HZ3(b>mukHr@4o)%Hmbw?XgDB;)zwgEJ$rLU?t%}lB)av_27N?Ca)aAi_ z2dJep=J7P@A@EKBGXUL1WT!{bE)9Sr;aK2+ileB7EH~|w=}Y|H5hEm$E*4iAUvUS95Vs6Fkznu`9AYiHux;?oWI@s(TI_w zN>hD3kbqnTM2J!5ftiOMO@_#&Sq-xO zByhYI(Gc4jo|amDIJ8(vRi}93Km--gngYQ3{`y7|wc^6k(62Jd7?r9%ZPJ{)wU}68 z6et#f!hG4ZO9!uh=0s(>x~3Kb3wRf4g=QZ-BDz`p@7id zE2Cr1c3D-qIC*2D3V#mmGS3sE`J&xk7Q+2IE~>jMWI-RT)(|%NmUX=5>HDM0iyY1E=O+)%T)Wxx(ycNCfIZV4aP!powv3G^WivNVZP6v0yEZ0#?maJ8Vif?KZ-yDF;~ z90$1F7QN_oMz`s82a;;ot(P7;?B8L-tZv$^Va;C=OOVKr#b3bldmmh%;CwR_*b#@W zvExo2^_q!g0Eu1zv=5o)wLNizw)EpDsscXaypGz6UX24+BBEIEvvWVi;}tZ@l)A^i|7tJ{Xu50e-BBD!j;G zyiRA^HctEIUOXaS5vUlT0{1C1&=O%&x`n=cqR%8s9@HOo*2R4}>HMvg)sTV0XNhB6 zr^OqU`)g28jLt}=DCmGQHOkuHaHrhE!9ax#dhB^2r>-s^oGT~Yf}+)gLy+v*%6Df? z(nbs5I54xS`I8vFx4wuyVC$fygF&YrjTvC_V|R$tzh4y8YDI6lBel36pk)U)*D88! zbM>5_&)X8--JSFPR6j+jzfxu1tG=a6@~C|PW3bI8a8xMjv_&zsKBk>m-Wb-c)*YWi z>(Z?JiHyo!jeG8KFQslb+);g$KaqLM@pL#cwk8iG4^TB9hUlrkh@?l60F2m-)h(et z@l0qe)98=_c6^=Y7Y%>m^m%7Bss6e|F~Y2W}Y$PwuNn6Bi@y_7 z{xBFA0dC0%;|neT$hSYwV`Sz6cf8jB7$ywCux#*8d#OB6P1u8sxryNk@HaBD;&ug)d3~ z#;#OO0_TnEGIeCK6QJ%dA}AZfbJaYwy2OA80IHQo%V|h8`aqvg<{9M;4_Rpt1ivs` z+E;9e4kM3l(>`VB^(ywWUym^|54ZOoWH>nV0#`FQdLD!HN$Dof`{`)_>eabE8@!Tmpj;F~945=rcc0HeIRj^SW`fKOO`U6&svij!l@YS5n$62?1?0JHiR zzgGUwEDbV(M)g|V(A;sOjXRi%h-0&SMJ5sV&FVU0Zjl{VsKHP6OAzAhcTe_o%Z?Rx z;1Hv!TVF;yEe%%~xgFYbA5M<4pC{e;Z_Cjl8%cXQA`cCjAD0tSlE25EB>yCSZ+hFda zj{YSJ?_VCCDl{vF`%LG`j+#s=>0yvQ?z30i-_;!f8;7=OR5~%-s%|XBZB42vGTm|7 z;%~9{Lau}a10!o90o$At_1(dmDbSA?PW!h_OLgZrut+jV`0q`!-%Orp-AH$}*8ig6 z&veW{a1e#Y^kA30!UMykk>dip$P_LlUOW+q7vPG(vIb%NbPF=b<)Wrr~zWKEAH(yd%GhpD0fKyI- zexfkIJA!%)0(o|n`1PNx+i@n#NU0`ERQi5s?Qd>_=Y1~>Q`GB!~w^Ic}4*_OVY z??f4=hV|#l|IBL!S4-%<5&a*Oy>(Po-xoc45kwGFy30#A7w|kxsSoX>6kKj?F5}H$QGO)p`*;JJ`>^Odd83=(${o0|Gd_;a7ry za()1QGN&a%958vgikYU>M8R1QzZRr_weism`;WN`I-AkYQ7y(oNRV2c`Dp!-Uv=xg zi#@T_^zRwY{Kuc2FS{RHk(NWytUtR7+M1v5#Xukn!Tx=wZWL$!-v}XU9|J{EQJX-b zlp3n}Uux{|2)zm#^|iH(+w~e)%Er{C{ixYdt5%a|!%oY$ar8ZKq%GCn9<5A6UQEc(7sFs#dnY74YnQ z?mG!Cof1sw=@>VV8+0o*<=t5ah5m#y`OyF zkM?cG8Nvz3?1n!Yjd}a~OQ2rA1I|J|$d`Z-%K~a<>hv4B8?Rq-R$q&N9pndlPGRv& zZ&Uco9If|NfPpe_5Lzl^9D*NRW~#Pp>Qy zc<*#`jrIQ zZZ5JOCH#I3+))CfpF^(q4c`OL+^{itH1R|bq<_0qBTzl%M66qeCK#9i;vtdUMs$0>zeAV zX}760U(V`Jc0DTs%4c$=83~)wcOq~dPko+JSFC96c;!&qnl5z#WmI4PDPPL@`t_+$ z!t{XlPFFPl6{q_YW}c~re`|_a^hGHTv^ju~IMzf_9z;=MxDF`~!!Z$yr2XGh6tl{7 zB~wE{?}Cs}Zw4ebVnR%cuJ_;1yPKTu?{4dgo8J7qZk-s52F1vV$-QHz0VqcJ^7wib z(h;7O`a$n07NrzHd;~XecY8fHbMk=g#sYhUxefxpq*Z{It5P?ovUA-gmaUXG&28A< zQsB`@xb|XceoghVZtQ%q^_)uO#Veef^sV#blS{>fIDQ{La?Qk|bsk}1N7xl2>` z6`6q~O&NWy>+*pj6o<9nnya36iputJ3UR+?pL7RGyG2x19~V1x$m^=%&6EC zbLt+S|9JrdGATbz8^b_F_&2%yzQWkM$uViiHy4yaAjn=^n|fLTQiR2bVqn32%LtdP zyitwuckWxE6DGrqM$dn>CI!1^p_}ZBy-P4wOO=S5SrDV}GK*s)erYbnWRM)?%6T^#2py zoaZb#Bv)p#2iqF{8F0eAqHyNhjSMvVxO5Q*j|6b3Injar$;tYm<7_~*9o+lyYSR(0 zg0|MPOrV+tp)DyD1gm*-Y_>P&U3lG%%dwtvxLNS%aAT#lt)|r* zQmFfwOS6O95XfZ*P7&$WFa6j{$mc97Q?O1-?$gEoj~7R5&#TvaDHt2m1*RNf#=`pW zHmfaZV_NgZ|HEi^HafC@Y|Jw@q5 zo)l(MA|)CKDi}x_rpoezr(mID8@gud^J=&|S)a3VRAJ_&&rB{OKHnn5yxjzBeKvoO zLFgUASR8&qap+;KnX4Cni%6y6V0$;XEq9>{U!Ig7+?)FHmNl(u7&ThFFj#bg*9})a zc~!X_TE1k;{3++IznA;x_$nOo)V%cNQ&xhl?}IGvTy?U_(cLw5NYa%sM=mJGI~gXR zGjN>}6_FJKL$1o&quZ?UGH^zN;)(IQum8u2NBvGbjt24x&}GA>xeF6a!)wKrN?KDL z>AszT;({sf6QKGN?^L9K@q|aW_S-&!sMXQ}ygPU_r~_7-wrXH`H09r>hn%@)C3LyAm!Vsn*y*Ch9KGwE~5ZCX>F0z9xxFthz zMBD$GDCVLkp6EC*kW1;O0j08a48h4l--eW*yZNOyBf<*3M=gv&I97+p zkRbhk7n=JFs(j_;dL-xyJ<#5v!ovLl@@RHpahqCW1i0NuDXSVR9CQ>cj8mDh`Jbs% zm+uZ9o`b%Rzw5O4S5F{tTAd#;I$f)Ez;H3moF0=IY1MnNlwu8GAf4?Pn-+Xy-7Bylt1a(B{{sR9+1(^(iSUD_h&NdUmD4V`JnxdNaB%6C zgz7bne*o<|oNhcfrq`}9%ATf@>W9YfqFm{7p9i0JQ4yA&_zuM;8XPpNlV_cdgA)z< z#qzo?s@}$MF4O00i0&98McYoGR9Rp=gywPe{Cvj{NW3@q%GjROn_Tn-o5IQlFBNkN z+z&g*mB9t2tzY@|Lg9H(oza{L$^fkGvz|n6g~W39J0_{j7##~XLoO$<&9OJ1a96LR z0?5W@r*0^0j8MxOg?&0?oZ|uJxAH6Uv*jWY^@gg+)PJh$`Zlzgmm}QUKUpTTUiDj{J`+l+CNuwsiZ)ILE^dMTCQ2sKII@8Q#0RMq!jid<23NCJi7!*4ugYNC*~lav zgP7KCj<^}>bQ_-%Go`;k_-R96?!n-60__Azu8B;w>CdNww#Y*Tx{6R7!josr;ud2_ zuUWhi6>_eq)5jrl;D4RRqF<4@fG`14wX%wJA@`kL0#65sR8UKfqmfNV=%s{^Ezn;F zF4zzqUyey~ZRObS89->@74j$@7GBCTT7Jj_!ZtG!lR*T97<^Hhlr|GnxAM4Z(1z(# zH?w@aO7_%6yTiaHkwXT+4fDY$Hn=Qf zZ2C(dIu#{po0Lk>k(3=+BKDg1JHTe1;O0OGiwq_q{yG0s$YT66_{pEtT+FEy%IpSATXDH5diW4RY09-$0$L@ee19M|q`;-CI%Z~;zb z8MEF3y^1$Us9&E$vyA|J0ml>$ggasmim#?W?F{}`3O zF`{~hM81~-95gOpgIy$|L;epiYmw`zVYjkapOx2m-P(C;(pA zO~V1w*l}FUiFk;Mc$m;{X{!=y4#me0;koF9!T6Te{mQaQ6rf9qX0d-;fi zh%&&k@Gg=kJm%C-aQtlOy}<$6CziGf0Me}lMPZlm%pWPxcz{hD#h{t1T?B2G<9#1J zb19%h^*!SOGf+gNE|c{d`9U8(qkN9~OVw=aT5jH6ZXHI}0TU1+2Cg&LJrIv&7SUM7 zsYhpv2&1$cpWafcjRV5Yrwvg;+B7omNnmXMdId5@Ut9$EIMWZsAZNkf&$%`tKbc&y zs6&ZpoA5ljM#U4_e-=yWLTQsb@`NHPfa72{xz0%T+N`2M6tE?!}48EE04h$Nz80MFn=^{AhrnCT_LWoukh z`n_1BpPo6mQbr4y)(^$N#gFA0j4;G6ThA(6xaTNANScThmSO_IOZ@>TfPqgfPD6R)K%Pq7*?1q}F|fh}VGKY3Z4LL{s{r$_ z+OGk}g7DLr9E>3Y`VIUSbAuL|9jQRO4I_F~6x*-;?3cliho;2hpv>bhKpQ0;o08(L zm)f@9>5dCOc_d3azGz^LJJop|#}z}2@GF56-fl)bo_^pa?em9K0{U#$`0`(0A;}8H!zc7|BF6eo|x0=+v%Ke8BF%>v#y5?E?{AZnP0V0 zkwdfRR+&5=pgkl8X^-&{a<0KbeN!3Dt~k2*J;#u`IH3%src}h9CN@DCk8;3TNT==}%gA871R&>D?wq^yOAHZ31$+I3_Py`(Ia&oS|j< zGqp3YKgNhaX46uv$2Zp0i!fF*$iD`NZv{=&brHap8znsDos4H74I%!#9@Z*RD2(A8 zrZxQ^!LA`{!qi>@9N`V5wFUk=cqM77C;OWT4jvhLptn<@#`wA4&}Vx9$SY=)a!_+& zaCg_#H>e8SC5X-;>{vdqGjRkdzwC5^Z9)zhZ{x-T{nVk$Qf_2blRHlF^+axS#{GHyu3)2GjTiXUQ|L8kDm#dA{I2{8Obv zFV&ta#g!FTe~n8X#SuIf+>@|xs4@7}+0yVsy>^*@ha`anY&cg%68MVnIILuWOd*|y8IIeveEoQQiUQO=EoUP7 z>OJAGzCwWEY*><$&U$|?rx_+#gumU}cQITEFtsiIsa%Rln{uP_6FQidF^C3CWoXgr z2IvCBkra2nrRQ-3#I`4Mf&VT`#=7Qj`!=x_?I0sDXy=gwrg8B}PY!s)3fQw?4Ny2d zKHtCDnN{chUYR_H-Pqnq*pAp>zT)>48Y+EgeC3;w-t~GHocaa%w7*Tgg1%*NVd7x_ zg>L^U$Ui--7ThOTq^}&n>%~YI(@aSn2>li*+CreecJi}zOrxIxkAj1`hbPDZ(Kz7*0o4=@@=6l@zG-L_<+QvS;<;anCo{lqoR_LerNzfsL9Od* zYywOW0@%MKY-~qzX$RPxA&Tang9Q~W3GiL-5$Aoiu8|J36Np}v8-Grc#tCi)>C2km zvnj-HK$lt4? zZ5x9i1{GpIVi&z9CUYl6taY`V?l$mJfs!6%EYN5bPZz^Uu@uh)?34)1@C}UK6`-4n zt^=^`Z{T4t+oe@N1yI4Q+w829yWWLeh-rdDg<}x(s%TfrtXF)vR}s+HWfleB-cxT- z%G@!ugN8M{P2dpgT)bZhODt&#)-`?ALteWWGNB4y^;}kWqoBode}B+bzU}gRmw%}X z-E4ATtF9L~D>|&AD3x*^vQ|YQx?TC5D6u~fJ4?!E=Xq5_xkl;YUjx}vbeVU|*IS;q z>NLOO`P$`W8+3aEvN9@Jc++Vs1W1~>xnPI}Hti_D{TP&&gM`*Jb~2IVQu)o5 zeUf3kuX504U%aqEkFxHV$k_4^j)pNzj9~5K4M;s+%wLMJJPakGdbvBRP-dA)Z0<7Z>JpZ*OTVNf>9V-0P+%p3a zH*O}8ocW_P3lD8o8&5{Mz-A=fiOgijF{p)$c7gw1wp!9E1ADZ!{HrM=JN_LRZDu5Z zUdjpv;EvTD;VevVe|olqnnIj=6XEBG;OpP{Gwsk@gEot8N$`B*nROp`#{+U~^?Wei zjmR_56CVgmC=eGxTg$1^Ul$uW#`Ml_mRDl{mE5)3AJ6G~mX_Qb@5iMyH51GugYT~* zJbUvpzsd`kt%i9H-}!Zb`^X$l9g0Nb(bjy^BP_DlfVUE&E8o|DkxnvSvBFDCXj;Cxna_1zBqv(yvAGER-^ITO5WROe%I$7{soFBT{@t# z3YFb7q8S*&7c7_l+nI7FRZnW;lN%BwafH2Zho}4FIyx z{^2)h;slLNQBFs>@^!~T=h>&b1UvJD2G!?YqHVf^Wj(~v+^z$R1%K93osc0)mqtE{ ztkxPQFDjEMVcJmH!8kQI)?zRmWd}I8A&`jST>T)`A7Z7#Pq7+#-sA%y0t$L2+G6hU zP(1EysYhzIf_9+Q7=#Z(tE0J?6HB*z(wBHK$p?Bh4II#@uXSbMpdwtd@s9y)kr(R; zvc&>gf7YYlNFNBCx(O<~aI$Z!uNY9iVDy=H ziEh8H7R4R`9Wdi&*d2(=0tPir!RaC?C)s264Ox|{6lk`x^Q};fuhFy3rEnoBQAYHl0RGn8Ze4u%%K86LxY)g!=OMoY z)>H%b)4?z63+`n6{!ZpY4i-R+YP0m|m;RU4ORu zu;&Y=3;ex1d%;I`&vLgmq)J30@$gknqw5TPw{G5@Xj3{Izq< z;!?TUh9yqDEbESQ$IZ?weX;TF502OB4;pPVx>N(Ms3C1i+e%Ly*3;b z#s4jIts%IlN4G2Z^!l$+1#JM?@D4$QeE)zVQ_2 zk+rW67lgQI(%fg~)MK_QLf+-G^5I&^f$1IOG=Nu28DZncjYF5I-Hi2GANhpVS(Vnc?F(Og!qx~^;El0DYKMsaa5e_Z>#cD3eub`ODcI*Z?uNm~@ zsw0~R#m7VYad2?}d0Fu-1{JtX)JsOlUxL4C)3bwac8 zqQ)Z6IShWXKrC!~%d3;ky315%*Qpd0gqs=+mv{$UmhIEb&L|K^zf0PSThW`lR=#CI zF@F?w5M~RP zV^-#ah3mj>&;3gzFgXAP!eHWqj_~jyKP&5LUTnoBG=9TbcC*Dx?8FOdXzhzjNGwRg z<09B-+_PbNO&XG**I(HWbpC(q&n;Qi`1W(aMe{$~G^BsG0-VY0RY1L29Dg2x?f)KA zkcgIHNrFMrlyEHLHo8eG`|w0fBAiP8wRM|oV2D)`@Nv&Sl9KYN3cL?6(l!YPrU#zo zMfdFh42Zy+4Xmk~)v!9BOY4j2fxlkBH3oEVxOxoZqv+h6O~^Ru3ATtW7KIYvQHAy* z84YmVq1fVeh_Nk-s@^!9I<9qJsky$;P#XuULo8Yfva6Qi%9;*|U!(b*aWf{-f^fb7 z5NQ#)af-Pm*a#3m0T9x8O0QY@TD~E@4;BZqA%De4GJJDFa?$($InL+*=@$t#1QewV z;F~~yxJ;TMHs}j~qndlCYV|-;Nf2%My)zHgU6Jp8ia*g!$8&v-;=(jo_5hRq`D~4_|<)eP;cP}LuYC=jg>uYhq=@o1KY%r z7cZJdL4(dH7n**j6YiEI>tfhayEG_%6x!6AE}2kKan;!_K^x!9#Q^Hx^W{9+i>}6J zl9E}Yw$pxu4ZaJy|FmY{xas_Wjyt}f12k4X>>Ktqj#IBa*%VMob0q+{39@o;vxyO5 zivUnO#tP%P9ITzjs*+|$VDRe0JrqO(ch@P%+?H{b1mBlg1w$VF0!KZ&I-g@nIsphz zv{h*Y5t>UvU!s0_l_{R+Hv9& zB&ua0^`VEbQv0#>VILgwFL5JWBH2cXcC!Bl8nG8yM)H2jNeZqoOFFP83Vwg&~OsgfH?pfq=SI6vd;OCTubvHs6WLC zqRs^tU)_`D(QW5O;U1!K@!NX_T|zCH9HnY+u6Z1=Fgw5uUcxdSSyMO(@;{;p1$62g zA-ua*Q}MHs=NO}*{8P&o7b;tzx+^+-BSQq6t$addkv28|Gs3zAL)%waw~~8Z)r!A- zvkPH!{MGUZ40w2errXvx4& zfIu@l{(RzehMoFaDkyT5Qs;pl?(+u;<70aVM@h@Jv4>?jEf}Dp23ky?vmVi53t}n3 zW$QNrv?5N?QgTfOnT6J#(JU9Z(VqI){NLv2>BFvAMGbp-t({^ zLGr=)AuzM2?G9OzGbY}auT$`S=2r)E)8dHRBDS*vpU9a{fw$W1EkHo_XsIMLy_&iN zAmr81^-vlwvZRn`_gQ|x%nm$rlx7fiTcrCB9mpT|0_K2N0X)+}Tzd;bb?#FKY$xJ!``p;P3DRdBGN`H(3BnyFW*CDuc;7WPtnK0x9ZPOck*!M+D zONL6gkABW;kf#gk0bliLE7x;0s%cPTTmrbWL%q^P3X(V@kLdja2SCr!(pCU82L?$7 zciR7Y6Sp@?lVh8J@1JKgRb!e&Oxw;0!cb?Pdxz<&o>R0g+Jm^XFhoPzJ8s9Lz^M&7> zH^pS$W)plP^En;{{!N6p;CQV8i-<%@veWs{<2S+ImJmYY$kdiY)}drkWXe0$X8}hO zl#J&Og9ctXx81iaJ1AucV;e5aM$%^nNJJF86kI(;k(uA59=AoJR%EvSo-eHbdm+@FdNjSF7w}8c9V-!B>Gh^S*wXrmdGRX0|YIE5C-}b}Uoccs+0G1-rD`*cQqg{DfayqZl1p`RT{s^zpmDTdLS$ zNaky>5ZDOKch@wS#c~C|vmbX+Bzei!Dz<#?61O&+HYc_}9YOdp7feb?9dvJ5zZ8wp zDG)ceW+Nf2o$aL=!42qs$Ul}OZ{$LnqZlggoYZ5OUhHPas-ah8lt>8;_iw4j(pr8Y zp{-3vz{BPgBB&j*iC_PmoZokw);CpJvbo=y3|d?(pc$BHhRbv`)1{pTEk5aA;VKy; zIC?=HOO!K7s|X*1QtZrDD?0c}OW|#gybvPgc2189qquu`w#%)>h(Q|iEB(WL%>mCQ;USNE0xu}+23;Ei6eV#2{0*`oj|{4_TLb> z1kR0N*WYC?OVeU;36XXut%uGqoUEyTOwO@>PjxXeUSy{IiG}TL@!MC=ffRS>>~*lx zn;z!%$Fx~~CSO&MOUCHBDWE9>ni{1#pCY3h{(R>@`cw{Y$uj$qdQD3X_m4>Mr-44M zIVD}_y*R{GJDp=zi&0wvA>Zrp?|Q{wW?#m!CzSrM>WSXvO4(0l8$a4F>@2Bu%JuQr zn(3g~YD`}lAGo-Q{;YS%^H+&XOtAlh%G}((x{6nNsYrfzXOB_iycnAJmf6Loq1kVv z!o1UUm64M?~ZuqR^m+M3aQf7-5oPKyM*Gl4Ue-!$AZ7j%~@Vv-GMA2_4DUr z0r&X2mfhihXRc0lFKl`}SOTx#8kt<+<~<8$Si5Y1BBsDdNE$Wiw|1Ezbvc~e7|&16 z?bbYbZ>NukE)}Op)$qAdRp%Mc>GS7LnFqZZzBuyE8k)oT6n9V4*?OC07Y7r!q*l4T zd1K9%KG>XKO`1zdCnRn0S$(on=ug|j@wyQ5Q1sXx7vKfbMYJ3DAT3ud1^4r}^G_J-Fp$D>EEP*a=j_nha? zx6sBe_mbI{B_^7Vn!@_|=BEm!#lEsvfeYQAEOgIRAZ@^#Uil}h9)SuWRIIPbdZNk0 zoLhc9g~+Ln1_ca$8Xw(TY^4+t5$PMMXziPz&(CRIao@bFISaJx_<&BtWA|}9OdM}Q z3YG3Mzpj{St`W@paFwWjV8`Gb$0k=lZnC&}^dP^EgXZjx#a@+s3OMd+L5r+KeO!1stpn?)Kl!=cQj)r+2iaUE0wiz2}qP#6`eo!U66xx@V)jnIn zw(Xd(*owv8REOu=GC#k67xGLb6wGH_`OY6eKX3D0FZLtH_>$`uHhP8LQ-tevW4r1( zrz>8mzYgRq!TMa!|w3j z<_(vOs=^0BZTuer>tdhZM~gZ2F1&t}`};BT?r3~$>Yj`5UCs+N`_Rst?;`O1j?}8d z?(ni;O0{I^t!<837c%6DXeRK4<>Eg&8%uw%~d-3c~OFhQH(6}@t z=%#3b!`}xxLr*LC-}o0<0`t+@dA;j}Ef)$dX@TqR0?gX;2>kmIIA>)X1MX;7F(Eh$ z%9>WGN5#V3+7BskU8)7291XZ7=St$O`8*&qjN)nUjx?7fg~P>GGa&?zvzh6T&Tt;MX)T^c-YV=FnWS zuwOqTx-`R((HvW~KQGnUIT4h~kk-7OrB%Ljny*7HNE1cL*SW4a9@v3%84u5Eupfe? zT2~DfDrP5>KH3-l>_TQaQMQ+UF*u95GjnO`S7j;0_j&;)ziImbG9qHj75a=a6x_~Z z$i+(d-{d$?MmP2rl+0?Lm4U&CAL9+g!>K!f(($)vTqrD)mXLt5Bq7%wg_l%Xbh|m; zUNJ-3j%rPNpG+@ySP>t|l`eY(w)%GmKB@_IDeqvWnB{b@_AFnff48g@nt)>^U#p@;^e^q%8LQbwKc|<=gv7KoL zO2CoQA)gW)o^$XsUh!}}@w>)l|~;-7aIY+ao(37r-%{&lBx7i+cF*3!P5=jJp4I4At_8N-r1%W)EinH7x2s#UWE(y0DNF^ zYFtXs80WSFZz3T&u?eeDUM0aD2eCy1PZ!_CDHxXhxzK@e?Wa_uUPk;!{&3x-6z^@v zNaCDsxPY~EvFSh0zB^l-XE+Jpr`mE{ zpEYuNYP~oyZ0Rb`@RsnI3yl$#n%%TKoXJt-c(Y;VS6!*@5cN`OOxEa2tZGa6Q6vJYtP|zfJH+? zv)mSZJMRDJ=}s>qOYk`Gob9cwqd$r4wtC-fC26An)S&qHN71)Nt^2IuD(&IiQa98f zt04*eN@5fd8-9*T4gHqvC?}`RDB8{-R**CEoGFi}Ci#c&n%?ME0<-VwBXH{?! zf``sJ`T8aRnq9ck^d}O&xysxfZ*Agp>D_d4bj-H@yVjYqjE(K&;yAO~v4fs9b#CF_ zW0VHr1`E)*I~jsvt{7Uf0x_P`zflf?)-5Uw`^)eMXIn z{U!~A>N>376gi4A5(8+p@xCHgCIET#Xb)m!dvPD1Wj?n(dRTYaLyQ2Mwt>xP1SJpq z`=ckn?yx59pmXU&;G?5clke%q$OUz~VfDsPN}6Uq0jI`nx9UNK?y?Z*XFCIf)!Zm$ z3MYR=syyu&>;B|L)Hzzx8tQQ3V#IasSIE;jc4I}Vp>bQ@wWg9AofX$h6R9P}+Md=TYx5+pWAM+F~XJJ-!P?Zj}Injg7`4#zgcBqzVI6(ehe?cGK$`nGgRgeOQ*oiuIGJd zmgZ%An;coE+#J2Ck_@2t6}8^F623Tly1uHUdnTxp^AbMEKDa|S*_p_s%EjHh0JhQpML;)#~XtN5g00goMqy}Df|8oKBX zR{r6xzF7FoL2R?~#Q)Fm+u>1)=L^b5x9?IYngzv8Jt${Ze_F71$K>Plqore72mvI& zEjg*Q6;Yq7`h4{f>w^_>xN+J?;1LP;v)F5Jb!8?QNHrRvPi__MbC-ZVCiOm|Q0L&( z4`mp!e@l|PUJ&w@QtCluE8%6=Xg{uAc**13m!?=>lbWz&aQlV!K>Yj<=Vn#s_dRda zzWMD<31c{7y*;(qc(ZQtc0SDOY?JU{KDP7X^tkl_gbWOh22e%{Wbnf&Qf3b80WIOX zuFi|SMrugVyl=zdjdMo(S8k`?SLH2H=AEHgTOkE`ubnS8BpqiCkSKiLpl4i89$#GB zvVz2+trr~+lTcOSAkf9HOLN4lz5M*A1TR*Pr5Ms@NvM7s18oeaHmlaiKh7O zpA?>+)_BHadv|(05B56dk_o`tAr!wku)7&SW%r%Q0i@NAfjO6bjWz&)06GP=h7ihC zOOoO7sVn*0bd8ciP{|b0pJ1JC!2>+XYnLFkenr1Km9 z*1vm;C}Mu|4nCNbg@&7eAc&}u40*ymXtJWor5Y7I&xucLlv>!hSHEV#7~C!H*!MOw9+p#6fq&)CTBd3U%$C=@)Ned4^wzWY4GzS*BOR2EXuIjwjlW z`x?#BJ3psjvOpqHF9b&%Mnj-US&f{AM`&tA5dUOTv$y>Vzfh^%q)pz;{gnN?qdY=+ z!p~_Xzdz)JAgMEegN~=O_Q0j-$6Q?u#DoXK%iB`E-i}AnT!}JKuq|rLWfeW|NtVomHx>L4XblwO3WOAVo~)%Kqb0 zcEK_`AO1o((6YkCNPr${-M@bL7i_rCgx}>^JO0)R)e=3Udpm%pr{w?Kn+|1; zDdsP$G>Ufw7sRgoH_7L8y6j||AE|i`+DAGr!A?~r^@a+2r|S~+g4(qHbOX3HmPI)4 zC+#qN3~6+mi~de}Gh9fG>+VLrRiCDit@gvuL|-QBnqM_Bw=O$*0Tq>Wuq~AY-+fyC zmR8hM!W__l%dPjj9nJ3hZL{YjE{jcS7gQBm_%A z2XbUqbIZhESJ(<(CiTd9#B_sP8~g<21f;K(A6cD+0k_tEuV&tdPBK?o_VOnuWFSHM zh6FHP?`GzBYvOwBT%eXrmv(i*ZLdzr6j~|t{@EBA1iG3yAYlqW6tFnWX?lC4R^?3e z9jHoWQmZ+H^tAP)rDwbhj8tPo`njZ5zvPU#hUBq#F*%C+9fso<4WT{pXGV&x=!(2Q zP%oYp&&3MngaDeQB8CAwBwgJ3slXxMb7|Uc<_sqLx@%!VFiOby1jMV0FE7xR-e$-_=?0~&6J6BZvXx5|J?uiJ>gpzUuQf6bm?^f6oD8b1Ulln;TzFl9 z(p~T1hY^|J8$j14@jV4onL7Cw9Unn9PEDz<=Z;4$taw^D3zJtTS^0B9kRG68{0Ae&>WlS zkwHx&OpbAcu(pjyOWqcsOfnlqQcy(Lyr@_vi#!Q-hZ{Z>=;yMQz?Rd^U=y)lpo4O^zLzR4NQI4{qx2N>;;W3`Uu#vT8t* zjvS+v)vll)C?YwrshzY0sXQe}T8dL%%chFtH4!MC0OtK^MDr}fOv}eS@7n>_hOatE zQ)O#T)mpV7JKo>*89}rNm)GnSEcGB|a28{pWxV{1wb~>$Hm=~dBpG9V@Tc8tH)>J7 z*4TIW6twx1319xCo@qE`HZpairYQ0vtJ^0uz%97Z9s727LsZXZ*_vG}~58LmdzWjX{u?*B}SkfBdF!LuqRd^FM zTDA3QuarhrG31rloyRuq17ULd%iKs6t#Hpb4O zjWoFAuxJbx0Ia~{*Qjv#k!oBfXAUL9mc}U=3kBqFsp)ddZn)4^0(M36RqlPJBkiv+ zK*KeP#px7qzatmm2BGSOI}MhtsSvWi^7Y^X8!s~WgB0h|o74Uv zm4XoZ9%u~!cY^#Y0SZU36F;*Y|9xRh8!oi%u$N>(dbw&*T3_Hu(UTw5`M$M@5)8aZ za%#0w5S?mnCCLn!0wbeVh8I_gGx02Mh;a{xqjzs>CTzcVG}RK)t@INeRJV*l{Y< z`7EE~M&e(zgDjhi&v2yUXtb*;6{}JmcAS?U!C<%@B@zs=`=lKYko`v-la-XX#n*t# z1k`0I&q5g@A-v$evmjDs4NvfUVxYuVlpWW&{@8~1MWv69mINNNL$|N;g8O_5%vhNA z3i5H!8L-qG^+bwg^~ETJz-~klkV{4|h!l1WUc(cdO=k;KAp?jO4_ z44v#udtqHws_YG@1%YlFF2-u+AUY$pQ4a?+_)Crxq8QvEJW}MUGhl;2>vd^Z;K?`# zbE&fy-^BBX*N#MWMH&;YOfc#H31MqT0swL4CczaBv{>2nT!Tuq%a6`0#GMh^rO#L7 zvkQfi3AtAH%MV$#=zOk`l~)VO3JM*@o(mBZs)~}sN7{OgKFCvL3qgj8&W|8XN7sZ0 zr_+Xqt1GH2QCkL9wwYnQgxUgHndrRE_<0CqZjoN6$HL}v?LMYA#6K{K-tlWehvjGF z*)hjsim@8{IQT1QI}L$vKVk7;c{i-JyS}{1y0YY0Kem+Msl~$3boVQ_78al9k7;Yn zq0rh@(^-qRCFT+6AdJh6s>V|ksA#u0!1ce}fkS)aV4A0*{oH>`091gA&9>4-KCTkW zn;!~g7b7s7geMEunM$ia9St!LqRU_2>#Y=j_!76vvVw=qTW50|1sgI$%aGA;V=~dfIXyNpuVXLpG?!#|xt4;}mai?{K~{ zNDEg7$d_pO=s9xF4hEHIYL9OZ)t6QZTBCpp=9?myInhf+sXz-HPDWh=(7ww8NY|j( zsUD|_30TZl-K)7?_wWlHnte}RPs#M+@U6x2@w46EVI>h5^9z9|8|r|dEI`!T6bJ4A zFBKN_?5~5aX6#sv)m@DT^?mbd3#%^XVspo}sQ&Dr&LPgx6gvvZ3$ETpC-8nnqr3hm zqi=CEV~x+~`!lEV{jQU6qoVP~nMdlbJ<|^-L zLEEZPb69g*no3oXavGv2M9*h0og~Z42uXx zMt`#$Ym`j6|RKf}5gK@N7=e{o-@$}QjUHsk_vTRULYI4z?MA&y3 zk{)U(_oTqmyE9#M!}i9`B}x>ZZi}j|V!!U0Vt=7eD>e>u*j^TlD0JA0y1Cscx+-Zr zYqwy_adj=}Tz@B*9g*a|pr$oN|CkSfNo9{dU_#_F1)qkp(%F9#{LC)(Azm1Ug_M}q zB;CFmhsk*C^+i0c?#C(s#m*i2uB6=HR+!XeSLJjS?xv5@457ol}YxGRNkeiZ)W zMhi|Tz9&0AC1jj8X2AP(bVMbZmS+HmC5)?2Z}Pnuo6jZ34Y;Mp2}p-ap^ZUh{YLS0 zJv_QA{^te2mpjabQxIERg4y+Q zcCpW}3RNOC@!eU&R|qP>#MQXlf2F-fx7#+h?m8P@>ncW&v_-O4l+?I)gPa%kdO`Mb zJo|Lh>i#;AEI^WLNQ=Rl#xP1BZ1_xwuA7&v=BjNS#xRG-)}rx`nxQM&>BWx?B2x6$ zHc=!>lap+*Yo|RGZdo1;+?mgHl6gurieGF1fsk7 z80CoRrG#vHvos8CExoEiIy=Fy^?1OI=nIbeP(|`iJo^#NlfxIuAG1l+;vlE=_W`jn zm2w|AHiiq+E1yP17?Q_vBkSW^b5kbi-qz zYd-LZ%lDmKKTTj{-At}jN4G8ewg{>X^ttRE(PfD2mTF`ok&tR|M!^}?9TxL>`l9Eu z#PdHhQxbP+M<^_=@0RBlB47*#@O_0?%t96I7e;fRYHzV{36(C$1ulsqzIBklHO$+m zI&7xVQFzETG8{20oWw}j>U(($jSjCN>Zw;qFSux5tdqzi7X8yjt8Q%b<-&y8)=vb? zd{A%t$Zizz)?!J4M*A5$s<*b6o3hCh9W+^dz1TGBS>}Gz%)K>YDIuEmBqa-q{ukJC zckchBBb@8y=VPLRQ@jcM@|Me+BF_nSkBmM-pCItRtp#t*&ESyXJ4pSyk&UVrt$mRb zhEGv5SRo+e2x#HECVs??EM}N8k;0vticmuEqR9I+Xhp*`JSe{Auv(Fd)5O(Igfkfop` zIcX+Z$&N}s_G}XSP0_b4`XezrScfosrh~u>!FxF`KPMoV6hS&sFke2T5mImqW?r!+z*5~)W&vVXu zzQ4{t-?#VWwJzCf$C_)7G4FAYdyL8DzEd@iLUq)T_c=K2FLX@Sq9-TlP;ioJ@C%^9 z>&guTqtv8Z;t^K#Fej(I82H9YCBB1xsO?Rcrh__!tI4CyIeKE?y5r|HGj3 zb0iTeB|z~c9|kcvz9FFTUwC_SxayQT`6vvXlIEOhc)@^D$Po!S}@}_bz@!c>!XTo&l7r`sZ$-P3w@U?c_R#T zlf*d!Uh1|ezURZHjJvKEHR!77xAOF-ccoR-1Ywz+_7wJkdr!oNgdRH7B@Sg4Xm}m} zpR(X@Aa;nrs}}JBYbNYh?UTqYsb|l#rbsfkU-hYZe`bKrE`0mr4!yNLH97L z5tW~)J3ief4a6J9c5!gj<3We`0fdqu(h(>(OTB~9zATgJ^Gx#TFt+L__S~4?A zTZ;B7$r$#HKCNlKK#8mRX^ac|$E4u*J_n(KC;#TNLWa&o$M&}L7-Srl;M^SUCyjiH zH!y@9Xb*x5h%8k@C%w})QTU1l`&1DKZ>8gS@F0WlW~mZ8fk^1xc7pg%eY$=#*IE4a z&OnZr0>O>9d-E)un9uI(t9dO>+ni>E*|9H8T9uoPqn@A?wi{C>6Fm*o&jV|Y z`(m`Z>(-xDJv66eGj%1c7I#&bJ(o-Mm~yxg0DK?`5jh`L$faqr0yWYG3<3T>1(k92 zg(vj`%~DKTDU_O=f@l$0KT|8w@!(7YuKcZ5y=b;D?z)m>K~ATCRMZhvG@#)a#)t^d zfm951>=P-u{H*5cFGtF$J^u<3K+gi$NR!taMOW{1g?!7WvS!^93B`{B zE*}9xOc0}!zkRP|M?GjGCm%vl$YAse^iY=)1%#p=Kw#uYlJ4JFFKeV>F}zdQ6;1%3 z?iVjW8>E>Uc;${lQls;7qlWs04;sto8h0F(SLmfZBbK-6VxqNIBFI1y;AQ{lLrNUa zW$*S8B)e|^uYA$|Lc;7(f8yQWTSuqs`wx?{#sTMTB=>XlRPr#mt|iEkQ88z#@qu;m z06^I=#@#s-D7MaW7pQ$$Y85Q`6Wu?T8SvUd zbot!ils1) z`KTcWjQu2_M#hgkB=#H?gz&}762N7?>u1W~P& zckffelfgy(v{X=2@B!rPj~miZKkh+swlB2-%e@~PU z&s}psOgv0)?G~pU%~U~CKzE>~BLbpMo5>>t4DfevY(Kx$M>Vc@8VL^Fov$cX#B?cL z$pbO|6c#pCNztL;EAK|lSe-(#EWzahlJvvxk>YoQFVvV2cqiZrzyk58SX3wzOQM~77n7X*2g+l8Rmv5<``**1;(_aGV z6tG$Y_HgJ^rHjDv>c+Nf&p!hqZvOwKgrx=iH&oKRYNpUEu)3kHB;jjCgwZ=o%ZIh~ zxi+Z^MeEw_d_}bnzyFRS_v;Mo^w$pufuD}GglOZXWjM|9r_W%pGB%dRfoYmfm(i!M zI~8=uJ{nrX084p%1o0wy~F7i~}U?}F0b(aF=FbvWwTK-7?-bzUT;&b@9B*6rn ztTvWoK5u_`7(GIM6y$npTO`YcN^j#fRY6nw^I@^qE7QV11dK0z3YdkdF+Xp}yXEK! zYJVz-HtBA`99e$64>TOKmL6>o@hB%#FI!3dar?I}Wj5Xi@m z%gpvWT&~A`_K;VSfq1R%%xD7)*)7SRwpoB;FTuK-R=`4A3xPEzG9j5_9MD!s31zzh z;uXk#f8vp^z|{h}r{`0m^$|jpQkqQ8o9EcZGn6+E<1QGZf!=dhFRX!cVHaH(kX|sQ zA@GlpSmSKUgl|5dS9WF3TgLxCAmPadIs~gx&wpHgbtdR@KQhOJ-6R4?i>|W zz!WgQL$Y z1EfUzcHA)Ihsm&PcDkOG;^+5<@AP|vM!Te8Js8Qg2~O&%r9`!(1HNxncveu$P1%iq z5~siijtJq#Rd^`1DhC4OC>Rmzrhj%wO=57}U_m40gTkc5n-xrrrk&B+6;k)wXNK%K z9*xAHOuHTI6@P7}=c9(+XWZEqBUBr<*h@cLWHdbYW5+>fRpjC%c=CCU7~iRIwA!Mr^^V;wiqY;v6C&dJJZRL%R2 z59Yv9;jrj4fRBflRU0y#d|*;cm>w(4+%Nwj$# z_8>sMVMPpKHvYQpi{hq4-|OO{!nDY^PoJe zNZ=*`ul!XF8t~=&*88XtcL!$?MlhT}VMzpDgRva*Zs#QZ)mC*c8x!p1A938B4SyvS zs_Hy0#Z>|-Qjy=n<@MG#nTg_!rU^acMIb{iVpGaj;qa)^GR%l!QH3sOnLrap4G2FV zcTUGsT0_)O;s?NS*G`?JJGElm0Z``Ols?K8{<3r2n5Vqh8|?p3T_N)+%|p3s@v0Rd6iYTpOO| z6z~jhy#z}_$c8X*Ss7Kb&7djCkvzy=mq=Y9jW%H4>a6D0e~zGj1ssH0dEY14UY<{o zowV&Jb9ye`w6O^vzIw0m=!Ez)%#O8z)O)YySS!I@jSwghTc$>ditJAjVX`C0f;Z-T z>s3eNQu8IQ%IW&K<724Z(w2Fw1n#!wmH;?^(lcjV4wQHGN}@p zsN1nyy7%UFvu*I(kXJ69>nc|>x)Y`xH)Sh^GaTFf?qP+F`T|qD0?Mv}K*$4vmP=At z1))Dot3SA;i2^*RbVC??Loas($@**n4QVktK)48be<*gtSF6(8pJA;#7e5T1>jpD& z&(ywtQj{Gn+gkSVi{oV!9p6RIb$CpS53kWYDnv!K5?)!;CH7>!`OA4qVIk&pg&BHx z16tV>tWCN+bC-G@{ZObxS(MR?&`|#mPGF@Wu<@<@Gi%`@l%ruTFAx!^S&Wfl7@jl9 z%RDjf>Y`OSj^??!9mX@AZ$PKtR$AyiL5mKn-|QezIlud)a-A2r?`J~~Pn8b)5(q~>*b@<9s}K-&3a2B>Yr@8khEPSCgCZR*j}9c~8Ks;Q+Kg4|v2(ZP z!tatD4l^c3>r3aC`}x>#))Nv#YOWj}I13aL)>h>-EuH~y4Wt{A_xnTVy1=>TQGh*7 zgt3IOFQw#RGsHiY2v9Qq;y~Hkx6hg>4cpH<)sjGb2mY00ytg!X-gKMO^TM!-s{SyvI7mZ}+xF6QJD(1B7dxT}kk#vs!!Q)uM(qMN5 zpWkt*?p=*AW6<{XbwPqjg@F33Q7P0~kwYYI+GzHRm)9mVh4AxP=s@vFb$ij|4i2r6 zf#Kp_iBk--JNen(Y!E2C@f5tG<#>Qu-j_W5KPwG(E$1)TsFu8rJ^Xol z%ulOAus*TBOq;Egohl#lL0va&9&vvRVP5oGw(PaFCQ*|76nJ7Wy0KUkiwTQywQsCy zTip)Zh8@3oaD4~KHsI4>nYX|0o^IZqC9Z4v4uHKzk0eD{11eplKLgGJ7#f&lbMg8i ztmLE;KM9CA0Y%;yB=dwNQ;P?EI1_hiRXJx+Z>KpSIeNK&@_ea1I_UwVx8asFrqA%^ zs+#gwzW^pogAXLPhw~OSUlS4nFIRD4#Y%%iL%!8Eq)l!%_zSTaK@iBLLYB_e2{~W! zPx!wb^%+`{ENY9->PHv(^vP4JjQ#-O0KjDUP4l-i3x{u<4F5h5Z&y$`8_S)zsSLZG zy)Pw_94afuayfSjt-sDwia9@P(SKR(?uo_K!;C>2AqEc-4OQmm8(UltZVI2Dd<*D} zLcwbb4drMn-lP@I(2T#?NH9PWDXFrGm$)>2Yy~ypFBQH<=Gutp*`ZcvQ7Y_ZL6H)EC?J~@7 zp52=ZiHcG|7XC}bA6Mj%OaPPdGu{lfi;Ihw&3m+usub&TSGeEeHcS?i^vt`HMC0F4 zFmwXzpzfNc4frk%ybP%xG*vtTR#C%W{lr5{HK?)SKHB{}D^UQ?saoNnzqUM(4}^Ki&f?VfuqdS4!enD#xQ=M-B&To>vfqKVBS}v!UDwq9gpxb`ok9XX%b&+#?sO&w; zi*n_sRVHWklKTR{?KOQWgDGD@_ybeV0{4-TPT&wm9ry%2S6NqamiPDlF=NA%+8W>k z=C4liW=={820zKQ<3`^BnntFE`L+WaYsnMKuRK0#HQi+1~ctU)OF$(ma8c80vKxTaE9y6oX|fPqQ8l zJy+oC-u)zc#zfhr9yEkMkWU)~m!A9|5j{KGKxJb`MJ~)I6ax>RGWyX~Hu5~{mJc{Y z5dk1Che>RMiDUi0;GQpMC?|E6-B+Ux-(|h6xrf!&wBzqq-S)>1ff|jl`+01D9v?!| z@enLqQH9pS{bh1>JO8!_XT3QsC z7~f3gnn7IhH!S)EQ>qPk#EfeU;)$?kZ@;G>M>YnD0e$X7LDeN?5(p%ofYCzC57H-) z4OO6U-dtU`8ZD3JzM*Znd$f6%gAbZ$p$^cTx#C6ehXO~&+{v7yqtIM9LFE5uU}EZMY9 z(d6e#QU?-Jr9!9=AQ!t*Qiq~;FR-z(b&82CjddUp*tF-=@IW1Fs8ri*eM+7R`K25N zP#1Y2gV$82kYRu;& zpNvJ%8^xvW7NLltdQh%VorO2-zV|np)JOMBNb%9Ouh`YT2Iu?^pfKX9Pn&fOW|uF3U4nFKL~axidwY>-T4E87<-tP|?7m}w3Km{I_a0?ueM7^g zi0_8!Sw>*%=9^mE2lJYK}`JAv-4wj7JOWd@ZN~%Euge1Qs4=2WuA2> zHO5`(Kx`J2%>;hN$oytj#$tG2R?$wra`B}F89?YZ{ncEbUpv}dnm{0F#YkkF9zMs` z?*3pLn}K@<$Q8-M?Vbs;6J_}OcZcqJzXb&1o5UZJ;Fc8W8o_qydU67t>0tT?&|AY( zy(MmKvO;^%$%3XK-Z6+g(!cTjuKrlyx)BIGVTfGF35T^KQ>Sx%R{okdWb8Kcm)>AJ ziIQR#kiuouya;vygC+dZ5@19a^j~9nK^1R{@iYreV6AzA%zD5AlV{K-F+2x+w8Qd2 z_;Zy;cM`~l_qy15ND)(mbH-V?qr`i*>ezP4>)DeVsR^dd)R~rcoN|! zqu^e{_=u`LAcDvp{!zp_zG(F)=zPNK^_{_HcXWKY#A`F=e$WlJu^FUExBMyhWU)qh zefu~5*S;^%$ZJ;KhJ8(u$Hc95s&+2IYz~(+Y4VDbGYa`U76VE}F>e8v`4q1)X_=6V=^* z@t2LgIoesU0S}`FOZ~6KVpXHfbj3-a$f@S?{k;6g?y8EyQm~krSoJ_0U~ezFzcpMG z_8bB1P9zu>t>r$qdz)N&i&NRx03`AMQ-Ke8>H$bFs5GW4()Gy-hXUFRlv~2W3lUWq zX#EMj2k5-Ls9DnDZL}T7Ui>u#@Pq;V`Rc@b*Xagqp(MP4yymq;pialKM~02XTA!T5 z_2e1E1=SmPFkQ7f188D;a~=2bbvbj@>UY&&T*nN-f_miY(8pLJ&vJ*x3g=c29|%E9W?=8D|K&lB3&Vf^`Jgdp{sYGUUR?j~3wH zKmPyDr~m)s6Wd%%K;_npncByv|78?hp((6fp-<>s^AY~FH2ryr&U;zBVaY@UCTO($ zH+3T+O@n=8YwxEgGSmla zNPqFyt^xpM zb|>~|CM<2$%$2bK;eWujv_+DCE5&b83<>sXT&Tbfl)l$%jL2Oqi1As=vXYW-4B8`%RZ1Tj2qp)R z00f4B`JU@xLIY@L(>XX?oS&z@ctGfCx#+gBz1>=2rhaYKIlN79z}GO;KU|#XFc